input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>config_interface/Config_cli.py
# MIT License
#
# Copyright (c) 2022 <NAME> [srccircumflex]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Literal, TextIO
from sys import argv, platform
from re import compile, search
from os import path, access, environ
from getpass import getpass
from ast import literal_eval
from subprocess import Popen, PIPE, TimeoutExpired
from sec.fscIO import pph
from sec.fscModule import FStreamCipher
from sec.vTools import mk_login
from config_interface._rc._run import attr_info_rev_file, gui_info_file
from config_interface.sec.doc_reader import get_all_keys, gen_infolines, get_sheet_json
from sec.fTools import print_cli_format
from _rc import configurations as CNF, _run
from sec.fTools import DefIfEndIf, LineFinder
from config_interface import msg_strings, ROOT_DIR
KEY_PATTERN = "PATTERN"
KEY_DEFAULT = "DEFAULT"
KEY_VALUE = "VALUE"
KEY_CAST = "CAST"
KEY_LIEV = "LIEV"
KW_PATTERN_S = {KEY_PATTERN: compile("\S+")}
KW_PATTERN_GT9 = {KEY_PATTERN: compile("^[1-9]\d+$")}
KW_PATTERN_FSC = {KEY_PATTERN: compile("^[1-9]\d{0,2}[+-][1-9]\.[1-9]$")}
KW_PATTERN_XFF = {KEY_PATTERN: compile("%s[^/\\\]*$")}
KW_CAST_INT = {KEY_CAST: int}
KW_CAST_PATH = {KEY_CAST: path.realpath}
KW_CAST_COD = {KEY_CAST: lambda c: c.encode(c).decode(c)}
#KW_LIEV_NONE = {KEY_LIEV: lambda _s: {"none": None}[_s.lower()]}
#KW_LIEV_BOOL = {KEY_LIEV: lambda _s: {"false": False, "true": True}[_s.lower()]}
#KW_LIEV_BOOLNONE = {KEY_LIEV: lambda _s: {"false": False, "true": True, "none": None}[_s.lower()]}
_KW_BASE = {
KEY_PATTERN: compile("."),
KEY_VALUE: None,
KEY_CAST: str
}
def __getattr(attr: str): return getattr(CNF, attr.replace('* ', ''))
def __setattr(item: tuple): return setattr(CNF, item[0].replace('* ', ''), item[1][KEY_VALUE])
def __get_conf_item(attr, val='<None>'): return attr, {KEY_VALUE: (__getattr(attr) if val == '<None>' else val)}
def __setdefault_attr(item: tuple):
if __getattr(item[0]) is None:
__setattr(item)
def __setconf(config: dict):
for item in config.items():
__setattr(item)
def __setdefault_conf(config: dict):
for item in config.items():
__setdefault_attr(item)
def __replace42(attr: str) -> str: return attr.replace('* ', '')
def __add42(attr: str) -> str: return "* %s" % attr
def __fsc_tuple_to_str(_ttuple: tuple[int, tuple[int, int], int]) -> str:
return "%d%s%d.%d" % (
_ttuple[0], {0: '-', 1: '+'}[_ttuple[1][0]], _ttuple[1][1], _ttuple[2]
)
CONFIG_LINE_FORM = "%-22s= %s"
def __config_file_ln_format(item: tuple) -> str:
value = item[1][KEY_VALUE]
if isinstance(value, str):
value = f"{value!r}"
return CONFIG_LINE_FORM % (__replace42(item[0]), value)
def _write_config_ln(io: TextIO, item: tuple):
io.write(__config_file_ln_format(item) + '\n')
TRUE_PATTERNS = ("y", "Y", "yes", "Yes", "1", "ok")
YESNO_PATTERNS = "^(y|n|Y|N|yes|no|Yes|No|1|0|ok|x%s)$"
def __yesno(question: str,
default: Literal["y", "n", "Y", "N", "yes", "no", "Yes", "No", "1", "0", "ok", "x"] = "yes",
alternate: str = None,
alternate_default: bool = False) -> [bool, None]:
if alternate:
pattern = compile(YESNO_PATTERNS % "|" + alternate)
if alternate_default:
default = alternate
else:
pattern = compile(YESNO_PATTERNS % "")
def value() -> [bool, None]:
if i in TRUE_PATTERNS:
return True
elif i == alternate:
return None
return False
print(question, end="")
while not search(pattern, (i := input("[%s]: " % default).strip())):
if not i and default:
i = default
return value()
print("!<Wrong Pattern> : Pattern=%s" % pattern.pattern)
print(question, end="")
return value()
def __value_input(attr: str, config: dict):
default = config.get(KEY_DEFAULT)
pattern = config.get(KEY_PATTERN)
cast = config.get(KEY_CAST)
liev = config.get(KEY_LIEV)
def value():
i = input("%s[%s]: " % (attr, default)).strip()
if attr.startswith('* ') and (not i and default is None):
if not search(pattern, str(i)):
return print("!<Wrong Pattern> : Pattern=%s" % pattern.pattern)
if not i:
try:
_default = config[KEY_DEFAULT]
if isinstance(_default, (bool, tuple, list, set)):
config[KEY_VALUE] = _default
else:
config[KEY_VALUE] = cast(_default)
except KeyError:
config[KEY_VALUE] = None
return True
if liev:
try:
config[KEY_VALUE] = liev(str(i))
return True
except KeyError:
pass
if search(pattern, str(i)):
config[KEY_VALUE] = cast(str(i))
return True
else:
return print("!<Wrong Pattern> : Pattern=%s" % pattern.pattern)
if attr.startswith('* '):
while not value():
pass
else:
value()
def _value_interface(configs: dict[str, dict]):
for attr in configs.copy():
configs[attr] = (_KW_BASE | configs[attr]).copy()
if __getattr(attr):
configs[attr].setdefault(KEY_DEFAULT, __getattr(attr))
__value_input(attr, configs[attr])
configs[__replace42(attr)] = configs.pop(attr)
def _save_configs(configs: dict, _suffix: str = ""):
_configs = configs.copy()
if RC_FILE_LV1:
line_finder = LineFinder(RC_FILE_LV1, configs)
if line_finder.present_lines:
print(msg_strings.SEPARATOR64)
line_finder.print_present()
print(msg_strings.SEPARATOR64)
if __yesno(msg_strings.Q_OVERWRITE % f"{RC_FILE_LV1=}"):
for m in line_finder.present_lines:
if _configs.get(m[2]):
line = __config_file_ln_format((m[2], _configs[m[2]]))
_configs.pop(m[2])
else:
line = ""
line_finder.overwrite_line_(m[0], line)
for item in _configs.items():
line_finder.insert_remaining(__config_file_ln_format(item))
line_finder.write_out()
return print(msg_strings.FILE_CREATED % RC_FILE_LV1)
_file = "%s.config%s.txt" % (CNF.USER, _suffix)
with open(_file, "w") as file:
file.write(msg_strings.SEPARATOR64 + "\n")
for item in configs.items():
_write_config_ln(file, item)
file.write(msg_strings.SEPARATOR64 + "\n")
print(msg_strings.FILE_CREATED % _file)
def _print_config(configs: dict):
print(msg_strings.SEPARATOR64)
for item in configs.items():
print(__config_file_ln_format(item))
print(msg_strings.SEPARATOR64)
def _mail_server_config() -> dict:
configs = {
__add42('MAIL_SMTP_ADDR'): KW_PATTERN_S,
__add42('MAIL_SMTP_PORT'): KW_PATTERN_GT9 | KW_CAST_INT,
__add42('MAIL_CRYPT'): {KEY_PATTERN: compile("^(ssl|SSL|tls|TLS)$")},
__add42('MAIL_SENDER_MAIL'): KW_PATTERN_S,
'MAIL_USER': KW_PATTERN_S,
'MAIL_RECEIVER_MAIL': KW_PATTERN_S,
'MAIL_BCC': KW_PATTERN_S
}
_value_interface(configs)
return configs
def _mail_keyfile() -> dict:
configs = {
__add42('MAIL_FSC'): KW_PATTERN_FSC | {KEY_CAST: _run._parsbpp,
KEY_DEFAULT: __fsc_tuple_to_str(CNF.BASIC_FSC_PEPPER)},
__add42('MAIL_XF'): KW_PATTERN_XFF | KW_CAST_PATH
}
_value_interface(configs)
configs |= {
'MAIL_XFSEED': {KEY_VALUE: pph('! MAIL_XFSEED[None]: ')}
}
with open(configs['MAIL_XF'][KEY_VALUE] % CNF.USER, "wb") as f:
f.write(
FStreamCipher(
getpass('! MAIL_ACCOUNT_PASSWORD[None]: ').encode(CNF.LOC_ENC),
configs['MAIL_XFSEED'][KEY_VALUE],
*configs['MAIL_FSC'][KEY_VALUE]
).encrypt())
print(msg_strings.FILE_CREATED % configs['MAIL_XF'][KEY_VALUE] % CNF.USER)
return configs
def _mail_test(config: dict = None):
from _rc import configurations as CNF
CNF.HOST_SIDE = True
from _rc import _run
if config:
__setconf(config)
else:
config = {}
try:
_run.run()
if _run.configurations.MAIL_CRYPT not in ('ssl', 'tls', 'SSL', 'TLS'):
raise _run.MailValueError(ValueError())
except _run.MailValueError:
config = _mail_server_config()
__setconf(config)
_run.run()
if not _run._mailalert_defif():
config |= _mail_keyfile()
__setconf(config)
from _rc import _OBJrc
hasattr(_OBJrc, "__file__")
from sec.Loggers import MailAlert
try:
MailAlert().write(msg_strings.MAIL_ALERT_TEST + f"<{__file__}>\n\nEOF")
except Exception as e:
if type(e) in msg_strings.MAIL_EXCEPTION:
print(msg_strings.MAIL_EXCEPTION[type(e)])
else:
print(msg_strings.MAIL_EXCEPTION[None])
def make_mail_alert(server_config: bool = True, key_file: bool = True):
configs = dict()
if server_config:
configs |= _mail_server_config()
if key_file:
configs |= _mail_keyfile()
if configs:
_print_config(configs)
if __yesno(msg_strings.Q_SAVE):
_save_configs(configs, "-MailAlert")
if __yesno(msg_strings.Q_TEST_MAIL, "No"):
_mail_test(configs)
def _save_userline(userline: str):
BOARD_USERS = CNF.BOARD_USERS
defif = DefIfEndIf(BOARD_USERS, *CNF.IFDEFENDIF['BOARD_USERS'])
if defif.configured:
if (line_finder := LineFinder(BOARD_USERS, (CNF.USER,))).present_lines:
print(msg_strings.SEPARATOR64)
line_finder.print_present()
print(msg_strings.SEPARATOR64)
if __yesno(msg_strings.Q_OVERWRITE % f"{BOARD_USERS=}"):
line_finder.final_insert(userline).write_out()
else:
line_finder.insert_after(userline).write_out()
return print(msg_strings.FILE_CREATED % BOARD_USERS)
LineFinder(BOARD_USERS, (CNF.IFDEFENDIF['BOARD_USERS'][1].decode(),)).insert_before(userline).write_out()
return print(msg_strings.FILE_CREATED % BOARD_USERS)
with open(BOARD_USERS, "wb") as f:
f.write(
b'\n%s\n%s\n%s' % (CNF.IFDEFENDIF['BOARD_USERS'][0], userline.encode(), CNF.IFDEFENDIF['BOARD_USERS'][1])
)
print(msg_strings.FILE_CREATED % BOARD_USERS)
def _login_files_config() -> dict:
configs = {
__add42('LOC_ENC'): KW_CAST_COD,
__add42('FSC_HOST_SPICE_FILE'): KW_PATTERN_XFF | KW_CAST_PATH,
__add42('FSC_HOST_XF'): KW_PATTERN_XFF | KW_CAST_PATH,
__add42('FSC_HOST_TABLE_FILE'): KW_PATTERN_XFF | KW_CAST_PATH,
__add42('FSC_PEPPER_HOST'): KW_PATTERN_XFF | KW_CAST_PATH,
__add42('BASIC_FSC_PEPPER'): KW_PATTERN_FSC | {
KEY_CAST: _run._parsbpp, KEY_DEFAULT: __fsc_tuple_to_str(CNF.BASIC_FSC_PEPPER)
}
}
_value_interface(configs)
return configs
def _login(configs: dict = None):
_authkey_attr = {
'lin': 'FSC_HOST_XF',
'hst': 'FSC_HOST_TABLE_FILE',
'spc': 'FSC_HOST_SPICE_FILE',
'ppp': 'FSC_PEPPER_HOST',
'bpp': 'BASIC_FSC_PEPPER'
}
bpp = 'bpp'
if configs:
CNF.AUTH_CONF = {CNF.USER: {item[0]: configs[item[1]][KEY_VALUE] % CNF.USER for item in _authkey_attr.items()
if item[0] != bpp}}
CNF.AUTH_CONF[CNF.USER][bpp] = configs[_authkey_attr[bpp]][KEY_VALUE]
__setattr(('LOC_ENC', configs['LOC_ENC']))
else:
CNF.AUTH_CONF = {CNF.USER: {item[0]: __getattr(item[1]) % CNF.USER for item in _authkey_attr.items()
if item[0] != bpp}}
CNF.AUTH_CONF[CNF.USER][bpp] = __getattr(_authkey_attr[bpp])
mk_login(
CNF.USER,
pph('! PASSPHRASE[None]: ')
)
[print(msg_strings.FILE_CREATED % CNF.AUTH_CONF[CNF.USER][file]) for file in CNF.AUTH_CONF[CNF.USER]
if file != bpp]
def make_login_files(file_config: bool = True, login: bool = True):
configs = None
if file_config:
configs = _login_files_config()
_print_config(configs)
if __yesno(msg_strings.Q_SAVE, "No"):
_save_configs(configs, "-Login")
if login:
_login(configs)
BASIC_FSC_PEPPER = CNF.AUTH_CONF[CNF.USER]['bpp']
userline = "%s bpp:%s" % (CNF.USER, __fsc_tuple_to_str(BASIC_FSC_PEPPER))
BOARD_USERS = CNF.BOARD_USERS
BASIC_FSC_PEPPER = f"{BASIC_FSC_PEPPER=}"
BOARD_USERS = f"{BOARD_USERS=}"
print('\n\t', userline, '\n')
print(msg_strings.TOADD_USERLINE % BOARD_USERS)
if __yesno(msg_strings.Q_SAVE_USERLINE % BOARD_USERS, "No"):
_save_userline(userline)
print(msg_strings.SEPARATOR64)
print(msg_strings.CLIENT_PEPPERS % BASIC_FSC_PEPPER)
print(msg_strings.SEPARATOR64)
print(msg_strings.PAIRING_INTR % (CNF.USER, CNF.USER))
def _openssl_cmd() -> tuple[str, int]:
_cmd = 2
if platform == "linux":
cmd = False
for path in environ["PATH"].split(':'):
if access(path + "/openssl", 1):
cmd = path + "/openssl"
_cmd = 0
break
if not cmd:
cmd = "/usr/bin/openssl"
_cmd = 1
else:
cmd = "openssl"
return cmd, _cmd
def _openssl_exec(cmd) -> int:
_sp = Popen([cmd],
shell=True,
stdin=-1,
stderr=PIPE,
stdout=PIPE
)
try:
stdo, stde = _sp.communicate(timeout=3)
except TimeoutExpired:
stdo, stde = b"\n", b""
print(stdo.decode(), end="")
print(stde.decode(), end="")
print(f"\n+++ exit {_sp.returncode} +++\n\n")
return _sp.returncode
def make_openssl():
configs = {
__add42('SSL_CERT_FILE'): KW_CAST_PATH,
__add42('SSL_KEY_FILE'): KW_CAST_PATH
}
_value_interface(configs)
_cnf = "%s/%s" % (ROOT_DIR, "_rc/doc/openssl.cnf")
if not (cnf := input('* openssl.cnf[%s]: ' % _cnf)):
cnf = _cnf
cnf = path.realpath(cnf)
passwd = msg_strings.SSL_NODES
if __yesno(msg_strings.Q_SSL_ENCRYP, "No"):
configs |= {'SSL_PASSWORD': {KEY_VALUE: pph("! PASSPHRASE[None]: ")}}
passwd = msg_strings.SSL_PASS_OUT % configs['SSL_PASSWORD'][KEY_VALUE]
_print_config(configs)
| |
users where the re-added role was specified in the configuration of the external user directory.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Role_New = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.New',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL not allow any new roles to be assigned to any LDAP\n'
'users authenticated using external user directory unless the role is specified\n'
'in the configuration of the external user directory.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NewPrivilege = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NewPrivilege',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL add new privilege to all the LDAP users authenticated using external user directory\n'
'including cached users when new privilege is added to one of the roles specified\n'
'in the configuration of the external user directory.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Role_RemovedPrivilege = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.RemovedPrivilege',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL remove privilege from all the LDAP users authenticated using external user directory\n'
'including cached users when privilege is removed from all the roles specified\n'
'in the configuration of the external user directory.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Definition = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Definition',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support using the [LDAP] servers defined in the\n'
'`ldap_servers` section of the `config.xml` as the server to be used\n'
'for a external user directory that uses an [LDAP] server as a source of user definitions.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Name = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Name',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL not support empty string as a server name.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Host',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<host>` parameter to specify [LDAP]\n'
'server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<port>` parameter to specify [LDAP] server port.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port.Default',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Prefix = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Prefix',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<auth_dn_prefix>` parameter to specify the prefix\n'
'of value used to construct the DN to bound to during authentication via [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Suffix',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<auth_dn_suffix>` parameter to specify the suffix\n'
'of value used to construct the DN to bound to during authentication via [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Value = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Value',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n'
'\n'
"> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character.\n"
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<enable_tls>` parameter to trigger the use of secure connection to the [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Default = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Default',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use `yes` value as the default for `<enable_tls>` parameter\n'
'to enable SSL/TLS `ldaps://` protocol.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_No = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.No',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `no` as the value of `<enable_tls>` parameter to enable\n'
'plain text `ldap://` protocol.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Yes = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Yes',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `yes` as the value of `<enable_tls>` parameter to enable\n'
'SSL/TLS `ldaps://` protocol.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_StartTLS = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.StartTLS',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `starttls` as the value of `<enable_tls>` parameter to enable\n'
'legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_minimum_protocol_version>` parameter to specify\n'
'the minimum protocol version of SSL/TLS.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Values',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n'
'as a value of the `<tls_minimum_protocol_version>` parameter.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Default = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Default',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL set `tls1.2` as the default value of the `<tls_minimum_protocol_version>` parameter.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_require_cert>` parameter to specify [TLS] peer\n'
'certificate verification behavior.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Default = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Default',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use `demand` value as the default for the `<tls_require_cert>` parameter.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Demand = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Demand',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `demand` as the value of `<tls_require_cert>` parameter to\n'
'enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n'
'provided, the session SHALL be immediately terminated.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Allow = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Allow',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `allow` as the value of `<tls_require_cert>` parameter to\n'
'enable requesting of client certificate. If no\n'
'certificate is provided, the session SHALL proceed normally.\n'
'If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Try = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Try',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `try` as the value of `<tls_require_cert>` parameter to\n'
'enable requesting of client certificate. If no certificate is provided, the session\n'
'SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n'
'immediately terminated.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Never = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Never',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support specifying `never` as the value of `<tls_require_cert>` parameter to\n'
'disable requesting of client certificate.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCertFile = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCertFile',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_cert_file>` to specify the path to certificate file used by\n'
'[ClickHouse] to establish connection with the [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSKeyFile = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSKeyFile',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_key_file>` to specify the path to key file for the certificate\n'
'specified by the `<tls_cert_file>` parameter.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertDir = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_ca_cert_dir>` parameter to specify to a path to\n'
'the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertFile = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<tls_ca_cert_file>` parameter to specify a path to a specific\n'
'[CA] certificate file used to verify certificates provided by the [LDAP] server.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCipherSuite = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n'
'The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n'
'\n'
'For example,\n'
'\n'
'```xml\n'
'<tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>\n'
'```\n'
'\n'
'The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n'
'[ClickHouse] and therefore might change.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n'
'configuration file or of any configuration file inside the `config.d` directory.\n'
'\n'
'```xml\n'
'<yandex>\n'
' <my_ldap_server>\n'
' <host>localhost</host>\n'
' <port>636</port>\n'
' <auth_dn_prefix>cn=</auth_dn_prefix>\n'
' <auth_dn_suffix>, ou=users, dc=example, dc=com</auth_dn_suffix>\n'
' <enable_tls>yes</enable_tls>\n'
' <tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>\n'
' <tls_require_cert>demand</tls_require_cert>\n'
' <tls_cert_file>/path/to/tls_cert_file</tls_cert_file>\n'
' <tls_key_file>/path/to/tls_key_file</tls_key_file>\n'
' <tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>\n'
' <tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>\n'
' <tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>\n'
' </my_ldap_server>\n'
'</yandex>\n'
'```\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `<ldap>` sub-section in the `<user_directories>` section of the `config.xml`\n'
'that SHALL define a external user directory that uses an [LDAP] server as a source of user definitions.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne',
version='2.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support more than one `<ldap>` sub-sections in the `<user_directories>` section of the `config.xml`\n'
'that SHALL allow to define more than one external user directory that use an [LDAP] server as a source\n'
'of user definitions.\n'
'\n'
),
link=None)
RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Syntax = Requirement(
name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax',
| |
<reponame>fameshpatel/olfactorybulb<gh_stars>1-10
import os
import sys
import numpy
import numpy.random as rnd
from scipy.spatial.distance import euclidean
class CreateHyperAndMinicolumns(object):
def __init__(self, param_dict):
self.params = param_dict
self.folder_name = self.params['folder_name']
self.n_hc = self.params['n_hc']
self.n_mc = self.params['n_mc']
self.n_pyr = self.n_hc * self.n_mc * self.params['n_pyr_per_mc']
self.n_pyr_per_mc = self.params['n_pyr_per_mc']
self.n_rsnp_per_mc = self.params['n_rsnp_per_mc']
self.n_rsnp = self.params['n_rsnp']
self.n_basket_per_hc = self.params['n_basket_per_hc']
self.n_basket = self.params['n_basket']
self.n_rsnp = self.params['n_rsnp']
self.p_rsnp_pyr = self.params["p_rsnp_pyr"]
self.w_rsnp_pyr = self.params["w_rsnp_pyr"]
self.p_pyr_pyr_local = self.params["p_pyr_pyr_local"]
self.w_pyr_pyr_local = self.params["w_pyr_pyr_local"]
self.p_pyr_pyr_global = self.params["p_pyr_pyr_global"]
self.p_pyr_basket = self.params["p_pyr_basket"]
self.w_pyr_basket = self.params["w_pyr_basket"]
self.p_basket_pyr = self.params["p_basket_pyr"]
self.w_basket_pyr = self.params["w_basket_pyr"]
self.p_pyr_rsnp = self.params["p_pyr_rsnp"]
self.p_basket_basket = self.params["p_basket_basket"]
self.w_basket_basket = self.params["w_basket_basket"]
rnd.seed(self.params['seed_connections'])
self.output_fn = self.params['conn_list_layer23']
def get_closest_basket_cell_ids(self, mc):
"""
mc = minicolumn index of the pyramidal cell
returns the a list of gids of the n_tgt_basket_per_mc closest basket cells to which the pyr cells in the given minicolum shall connect to
list of gids is without the offset
"""
n_mc = self.params['n_mc'] * self.params['n_hc']
pyr_grid_x = int(numpy.sqrt(self.params['n_mc']))
if (numpy.sqrt(self.params['n_mc'] / int(numpy.sqrt(self.params['n_mc'])) != 1.0)):
# if self.params['n_mc'] can not be put on a rectangular grid
pyr_grid_y = int(numpy.sqrt(self.params['n_mc'])) + 1
else:
pyr_grid_y = int(numpy.sqrt(self.params['n_mc']))
# x, y positions of the minicolumn in the HC grid
pyr_pos_x = mc % pyr_grid_x
pyr_pos_y = mc / pyr_grid_x
basket_grid_x = int(numpy.sqrt(self.params['n_basket_per_hc']))
if ((numpy.sqrt(self.params['n_basket_per_hc']) / int(numpy.sqrt(self.params['n_basket_per_hc']))) != 1.0):
# if self.params['n_basket_per_hc'] can not be put on a rectangular grid
basket_grid_y = int(round(numpy.sqrt(self.params['n_basket_per_hc'])) + 1)
else:
basket_grid_y = int(numpy.sqrt(self.params['n_basket_per_hc']))
distances_to_basket_cells = numpy.zeros(self.params['n_basket_per_hc'])
for basket_id in xrange(self.params['n_basket_per_hc']):
x = basket_id % basket_grid_x
y = basket_id / basket_grid_x
# for y in xrange(basket_grid_y):
# for x in xrange(basket_grid_x):
scaled_basket_pos_x = (float(x) / basket_grid_x) * pyr_grid_x
scaled_basket_pos_y = (float(y) / basket_grid_y) * pyr_grid_y
# basket_id = y * basket_grid_x + x
# print "basket_id", basket_id, basket_grid_y, basket_grid_x, (numpy.sqrt(self.params['n_basket_per_hc']) / int(numpy.sqrt(self.params['n_basket_per_hc'])) != 1.0)
# print "if:", (numpy.sqrt(self.params['n_basket_per_hc']) / int(numpy.sqrt(self.params['n_basket_per_hc']))) != 1.0
# print "check:", numpy.sqrt(self.params['n_basket_per_hc']) / int(numpy.sqrt(self.params['n_basket_per_hc']))
# print "basket_grid_y", numpy.sqrt(self.params['n_basket_per_hc'] / int(numpy.sqrt(self.params['n_basket_per_hc']))), (numpy.sqrt(self.params['n_basket_per_hc'] / int(numpy.sqrt(self.params['n_basket_per_hc']))) != 1.0)
# print "debug basket_id %d x %f y %f distance to mc %d: %f" % (basket_id, scaled_basket_pos_x, scaled_basket_pos_y, mc, distances_to_basket_cells[basket_id])
# print "debug pyr_pos x %d y %d" % (pyr_pos_x, pyr_pos_y)
# print "d", distances_to_basket_cells.size, self.params['n_basket_per_hc']
distances_to_basket_cells[basket_id] = euclidean([pyr_pos_x, pyr_pos_y], [scaled_basket_pos_x, scaled_basket_pos_y])
sorted_ids = distances_to_basket_cells.argsort()
closest_basket_cells = numpy.take(sorted_ids, range(self.params['n_tgt_basket_per_mc']))
closest_basket_cells.sort()
return closest_basket_cells
def create_connections(self):
my_min_pyr = 0
my_min_rsnp = 0
my_min_basket = 0
my_max_pyr = self.n_hc * self.n_mc * self.n_pyr_per_mc
my_max_rsnp = self.n_hc * self.n_mc * self.n_rsnp_per_mc
my_max_basket = self.n_hc * self.n_basket_per_hc
lines = ""
n_connections = 0 # needed for NEURON output file
# within one MC:
# 1) pyr - pyr: 25%, 1.2
# 2) rsnp - pyr: 70%, -0.8
# within one HC:
# 3) pyr - basket: 70%,, 1.9
# 4) basket - basket: 70%, -2.2
# from one HC to another:
# 5) pyr - rsnp: 30%, 0.2 if in different patterns
# 6) pyr - pyr: 30%, 0.4 if in the same patterns
# iterate over source pyramidal cells
for pyr in xrange(my_min_pyr, my_max_pyr):
# 1) - 4) within same HC
# 1 PYR -> PYR: same HC, same MC
# coordinates of source pyr
mc_pyr = (pyr / self.n_pyr_per_mc) % self.n_mc # mc_pyr = 0 .. n_mc
hc_pyr = pyr / (self.n_pyr_per_mc * self.n_mc)
# min/max ids of possible target pyr cells
min_pyr = mc_pyr * self.n_pyr_per_mc + hc_pyr * self.n_mc * self.n_pyr_per_mc
max_pyr = (mc_pyr + 1) * self.n_pyr_per_mc + hc_pyr * self.n_mc * self.n_pyr_per_mc
for tgt_pyr in xrange(min_pyr, max_pyr):
if (pyr != tgt_pyr):
w = self.draw_connection(self.p_pyr_pyr_local, self.w_pyr_pyr_local, noise=self.params["w_pyr_pyr_local_sigma"])
if ((w != 0) and (w > self.params['weight_threshold'])):
lines += "%d\t%d\t%.6e\n" % (pyr + self.params['pyr_offset'], tgt_pyr + self.params['pyr_offset'], w)
n_connections += 1
# 2 RSNP -> PYR: within same minicolumn
min_rsnp = mc_pyr * self.n_rsnp_per_mc
max_rsnp = (mc_pyr + 1) * self.n_rsnp_per_mc
# for rsnp in xrange(min_rsnp, max_rsnp): # global loop for inter hypercolumnar connections
for rsnp in xrange(0, self.n_rsnp_per_mc): # global loop for inter hypercolumnar connections
# mc_rsnp = rsnp / self.n_rsnp_per_mc
# hc_rsnp = rsnp / (self.n_rsnp_per_mc * self.n_mc)
w = self.draw_connection(self.p_rsnp_pyr, self.w_rsnp_pyr, noise=self.params["w_rsnp_pyr_sigma"])
if ((w != 0) and (w > self.params['weight_threshold'])):
src_id = rsnp + self.params['rsnp_offset'] + mc_pyr * self.n_rsnp_per_mc + hc_pyr * self.n_mc * self.n_rsnp_per_mc
lines += "%d\t%d\t%.6e\n" % (src_id, pyr + self.params['pyr_offset'] , (-1.0) * w)
n_connections += 1
# pyr within one minicolumn connect to the 8 'closest' basket cells
min_basket = self.n_hc * self.params['n_basket_per_hc'] # within this hypercolumn
max_basket = (self.n_hc + 1) * self.params['n_basket_per_hc']
# for basket in xrange(min_basket, max_basket):
# 3) BASKET -> PYR
basket_gid_offset = self.params['basket_offset'] + hc_pyr * self.params['n_basket_per_hc']
for basket in xrange(0, self.params['n_basket_per_hc']):
w = self.draw_connection(self.p_basket_pyr, self.w_basket_pyr, noise=self.params["w_basket_pyr_sigma"])
if ((w != 0) and (w > self.params['weight_threshold'])):
lines += "%d\t%d\t%.6e\n" % (basket + basket_gid_offset , pyr+ self.params['pyr_offset'] , (-1.0) * w)
n_connections += 1
# 4) PYR -> BASKET
basket_gids_without_offset = self.get_closest_basket_cell_ids(mc_pyr)
for basket in basket_gids_without_offset:
w = self.draw_connection(self.p_pyr_basket, self.w_pyr_basket, noise=self.params["w_pyr_basket_sigma"])
if ((w != 0) and (w > self.params['weight_threshold'])):
lines += "%d\t%d\t%.6e\n" % (pyr + self.params['pyr_offset'] , basket + basket_gid_offset, w)
n_connections += 1
# BASKET -> BASKET
for hc in xrange(self.n_hc):
for src in xrange(0, self.params['n_basket_per_hc']):
for tgt in xrange(0, self.params['n_basket_per_hc']):
if (src != tgt):
w = self.draw_connection(self.p_basket_basket, self.w_basket_basket, noise=self.params["w_basket_basket_sigma"])
if ((w != 0) and (w > self.params['weight_threshold'])):
gid_offset = self.params['basket_offset'] + hc * self.params['n_basket_per_hc']
lines += "%d\t%d\t%.6e\n" % (src + gid_offset, tgt + gid_offset, (-1.0) * w)
n_connections += 1
first_line = "%d %d\n" % (n_connections, 3)
print "Writing connections to file", self.output_fn
self.output_f = file(self.output_fn, 'w')
self.output_f.write(first_line)
self.output_f.write(lines)
self.output_f.close()
def create_orthogonal_connections(self):
my_min_pyr = 0
my_min_rsnp = 0
my_min_basket = 0
my_max_pyr = self.n_hc * self.n_mc * self.n_pyr_per_mc
my_max_rsnp = self.n_hc * self.n_mc * self.n_rsnp_per_mc
my_max_basket = self.n_hc * self.n_basket_per_hc
lines = ""
n_connections = 0 # needed for NEURON output file
# within one MC:
# 1) pyr - pyr: 25%, 1.2
# 2) rsnp - pyr: 70%, -0.8
# within one HC:
# 3) pyr - basket: 70%,, 1.9
# 4) basket - basket: 70%, -2.2
# from one HC to another:
# 5) pyr - rsnp: 30%, 0.2 if in different patterns
# 6) pyr - pyr: 30%, 0.4 if in the same patterns
# iterate over source pyramidal cells
for pyr in xrange(my_min_pyr, my_max_pyr):
# 1) - 4) within same HC
# 1 PYR -> PYR: same HC, same MC
# coordinates of source pyr
mc_pyr = (pyr / self.n_pyr_per_mc) % self.n_mc
hc_pyr = pyr / (self.n_pyr_per_mc * self.n_mc)
assert (hc_pyr< self.params['n_hc'])
# min/max ids of possible target pyr cells
min_pyr = mc_pyr * self.n_pyr_per_mc + hc_pyr * self.n_mc * self.n_pyr_per_mc
max_pyr = (mc_pyr + 1) * self.n_pyr_per_mc + hc_pyr * self.n_mc * self.n_pyr_per_mc
for tgt_pyr in xrange(min_pyr, max_pyr):
tgt_hc = tgt_pyr / (self.n_pyr_per_mc * self.n_mc)
if (pyr != tgt_pyr) and (tgt_hc == hc_pyr):
w = self.draw_connection(self.p_pyr_pyr_local, self.w_pyr_pyr_local, noise=self.params["w_pyr_pyr_local_sigma"])
if (w != 0):
lines += "%d\t%d\t%.6e\n" % (pyr + self.params['pyr_offset'], tgt_pyr + self.params['pyr_offset'], w)
n_connections += 1
# 2 RSNP -> PYR: within same minicolumn
min_rsnp = mc_pyr * self.n_rsnp_per_mc
max_rsnp = (mc_pyr + 1) * self.n_rsnp_per_mc
# for rsnp in xrange(min_rsnp, max_rsnp): # global loop for inter hypercolumnar connections
for rsnp in xrange(0, self.n_rsnp_per_mc): # global loop for inter hypercolumnar connections
# mc_rsnp = rsnp / self.n_rsnp_per_mc
# hc_rsnp = rsnp / (self.n_rsnp_per_mc * self.n_mc)
w = self.draw_connection(self.p_rsnp_pyr, self.w_rsnp_pyr, noise=self.params["w_rsnp_pyr_sigma"])
if (w != 0):
gid_offset = self.params['rsnp_offset'] + hc_pyr * self.n_mc * self.n_rsnp_per_mc + mc_pyr * self.n_rsnp_per_mc
lines += "%d\t%d\t%.6e\n" % (rsnp + gid_offset, pyr + self.params['pyr_offset'] , (-1.0) * w)
n_connections += 1
min_basket = self.n_hc * self.n_basket_per_hc
max_basket = (self.n_hc + 1) * self.n_basket_per_hc
# for basket in xrange(min_basket, max_basket):
for basket in xrange(0, self.n_basket_per_hc):
# 3) BASKET -> PYR
w = self.draw_connection(self.params['p_basket_pyr'], self.w_basket_pyr, noise=self.params["w_basket_pyr_sigma"])
gid_offset = self.params['basket_offset'] + hc_pyr * self.n_basket_per_hc
if (w != 0):
lines += "%d\t%d\t%.6e\n" % (basket + gid_offset, pyr + self.params['pyr_offset'] , (-1.0) * w)
n_connections += 1
# 4) PYR -> BASKET
basket_gids_without_offset = self.get_closest_basket_cell_ids(mc_pyr)
basket_gid_offset = self.params['basket_offset'] + hc_pyr * self.n_basket_per_hc
for basket in basket_gids_without_offset:
w = self.draw_connection(self.p_pyr_basket, | |
function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.histogram.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `binNumber` Anything contained
in tag `<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
:class:`plotly.graph_objects.histogram.Marker` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
selected
:class:`plotly.graph_objects.histogram.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.histogram.Stream` instance
or dict with compatible properties
text
Sets hover text elements associated with each bar. If a
single string, the same string appears over all bars.
If an array of string, the items are mapped in order to
the this trace's coordinates.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.histogram.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbins
:class:`plotly.graph_objects.histogram.XBins` instance
or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybins
:class:`plotly.graph_objects.histogram.YBins` instance
or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
row : 'all', int or None (default)
Subplot row index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
rows in the specified column(s).
col : 'all', int or None (default)
Subplot col index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
columns in the specified row(s).
secondary_y: boolean or None (default None)
If True, associate this trace with the | |
<filename>exp_sr_test06.py
# -*- coding: utf-8 -*-
"""
TODO: Please check readme.txt file first!
--
This Python2.7 program is to reproduce Figure-5. In this test, we compare
GraphStoIHT with six baseline methods on the real image dataset, which can be
found in reference [2].
References:
[1] <NAME>, <NAME>, and <NAME>. "Linear convergence of
stochastic iterative greedy algorithms with sparse constraints."
IEEE Transactions on Information Theory 63.11 (2017): 6869-6895.
[2] Hegde, Chinmay, <NAME>, and <NAME>. "A nearly-linear time
framework for graph-structured sparsity." International Conference on
Machine Learning. 2015.
[3] Blumensath, Thomas, and <NAME>. "Iterative hard thresholding
for compressed sensing." Applied and computational harmonic analysis
27.3 (2009): 265-274.
[4] Hegde, Chinmay, <NAME>, and <NAME>. "Fast recovery from
a union of subspaces." Advances in Neural Information Processing
Systems. 2016.
[5] <NAME>. "Random walks on graphs: A survey." Combinatorics,
Paul erdos is eighty 2.1 (1993): 1-46.
[6] Needell, Deanna, and <NAME>. "CoSaMP: Iterative signal recovery
from incomplete and inaccurate samples."
Applied and computational harmonic analysis 26.3 (2009): 301-321.
[7] Blumensath, Thomas, and <NAME>. "Normalized iterative hard
thresholding: Guaranteed stability and performance." IEEE Journal
of selected topics in signal processing 4.2 (2010): 298-309.
# TODO You need to:
1. install numpy, matplotlib (optional), and networkx (optional).
2. build our sparse_module by executing ./build.sh please check our
readme.md file. If you do not know how to compile this library.
"""
import os
import time
import pickle
import multiprocessing
from itertools import product
import numpy as np
try:
import sparse_module
try:
from sparse_module import wrap_head_tail_bisearch
except ImportError:
print('cannot find wrap_head_tail_bisearch method in sparse_module')
sparse_module = None
exit(0)
except ImportError:
print('\n'.join([
'cannot find the module: sparse_module',
'try run: \'python setup.py build_ext --inplace\' first! ']))
np.random.seed()
g_x_tr_mat = np.random.normal(0.0, 1.0, 2500 * 2500)
g_x_va_mat = np.random.normal(0.0, 1.0, 100 * 2500)
def print_helper(method, trial_i, n, err, num_epochs, run_time):
print('%-13s trial_%03d n: %03d w_error: %.3e '
'num_epochs: %03d run_time: %.3e' %
(method, trial_i, n, err, num_epochs, run_time))
def get_img_data(root_p):
import scipy.io as sio
from PIL import Image
img_name_list = ['background', 'angio', 'icml']
re_height, re_width = 50, 50
resized_data = dict()
s_list = []
for img_ind, _ in enumerate(img_name_list):
img = sio.loadmat(root_p + 'sr_image_%s.mat' % _)['x_gray']
im = Image.fromarray(img).resize((re_height, re_width), Image.BILINEAR)
im = np.asarray(im.getdata()).reshape((re_height, re_width))
resized_data[_] = im
s_list.append(len(np.nonzero(resized_data[_])[0]))
img_data = {
'img_list': img_name_list,
'background': np.asarray(resized_data['background']).flatten(),
'angio': np.asarray(resized_data['angio']).flatten(),
'icml': np.asarray(resized_data['icml']).flatten(),
'height': re_height,
'width': re_width,
'p': re_height * re_width,
's': {_: s_list[ind] for ind, _ in enumerate(img_name_list)},
's_list': s_list,
'g_dict': {'background': 1, 'angio': 1, 'icml': 4},
'graph': simu_grid_graph(height=re_height, width=re_width)
}
return img_data
def simu_grid_graph(width, height):
""" Generate a grid graph with size, width x height. Totally there will be
width x height number of nodes in this generated graph.
:param width: the width of the grid graph.
:param height: the height of the grid graph.
:return: 1. list of edges
2. list of edge costs
"""
np.random.seed()
if width < 0 and height < 0:
print('Error: width and height should be positive.')
return [], []
width, height = int(width), int(height)
edges, weights = [], []
index = 0
for i in range(height):
for j in range(width):
if (index % width) != (width - 1):
edges.append((index, index + 1))
if index + width < int(width * height):
edges.append((index, index + width))
else:
if index + width < int(width * height):
edges.append((index, index + width))
index += 1
edges = np.asarray(edges, dtype=int)
weights = np.ones(len(edges), dtype=np.float64)
return edges, weights
def algo_head_tail_bisearch(
edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose):
""" This is the wrapper of head/tail-projection proposed in [2].
:param edges: edges in the graph.
:param x: projection vector x.
:param costs: edge costs in the graph.
:param g: the number of connected components.
:param root: root of subgraph. Usually, set to -1: no root.
:param s_low: the lower bound of the sparsity.
:param s_high: the upper bound of the sparsity.
:param max_num_iter: the maximum number of iterations used in
binary search procedure.
:param verbose: print out some information.
:return: 1. the support of the projected vector
2. the projected vector
"""
prizes = x * x
# to avoid too large upper bound problem.
if s_high >= len(prizes) - 1:
s_high = len(prizes) - 1
re_nodes = wrap_head_tail_bisearch(
edges, prizes, costs, g, root, s_low, s_high, max_num_iter, verbose)
proj_w = np.zeros_like(x)
proj_w[re_nodes[0]] = x[re_nodes[0]]
return re_nodes[0], proj_w
def algo_iht(x_mat, y_tr, max_epochs, lr, s, x0, tol_algo):
""" Iterative Hard Thresholding Method proposed in reference [3]. The
standard iterative hard thresholding method for compressive sensing.
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param s: the sparsity parameter.
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:return: 1. the number of epochs(iterations) used,
2. the run time.
3. the final estimator,
"""
start_time = time.time()
x_hat = x0
(n, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
xtx = np.dot(x_tr_t, x_mat)
xty = np.dot(x_tr_t, y_tr)
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
bt = x_hat - lr * (np.dot(xtx, x_hat) - xty)
bt[np.argsort(np.abs(bt))[0:p - s]] = 0. # thresholding step
x_hat = bt
# early stopping for diverge cases due to the large learning rate
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_iht(x_tr_mat, y_tr, x_va_mat, y_va,
max_epochs, lr_list, s, x_star, x0, tol_algo):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(shape=len(lr_list))
x_hat_dict = dict()
for lr_ind, lr in enumerate(lr_list):
num_epochs, run_time, x_hat = algo_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs,
lr=lr, s=s, x0=x0, tol_algo=tol_algo)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[lr_ind] = y_err
x_hat_dict[lr] = (num_epochs, run_time, x_hat)
min_index = np.argmin(test_err_mat)
best_lr = lr_list[min_index]
err = np.linalg.norm(x_star - x_hat_dict[best_lr][2])
num_epochs, run_time = x_hat_dict[best_lr][:2]
return err, num_epochs, run_time
def algo_sto_iht(x_mat, y_tr, max_epochs, lr, s, x0, tol_algo, b):
""" Stochastic Iterative Hard Thresholding Method proposed in [1].
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning rate (should be 1.0).
:param s: the sparsity parameter.
:param x0: x0 is the initial point.
:param tol_algo: tolerance parameter for early stopping.
:param b: block size
:return: 1. the number of epochs(iterations) used,
2. the run time.
3. the final estimator,
"""
np.random.seed()
start_time = time.time()
x_hat = x0
(n, p) = x_mat.shape
x_tr_t = np.transpose(x_mat)
b = n if n < b else b
num_blocks = int(n) / int(b)
prob = [1. / num_blocks] * num_blocks
num_epochs = 0
for epoch_i in range(max_epochs):
num_epochs += 1
for _ in range(num_blocks):
ii = np.random.randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
xtx = np.dot(x_tr_t[:, block], x_mat[block])
xty = np.dot(x_tr_t[:, block], y_tr[block])
gradient = - 2. * (xty - np.dot(xtx, x_hat))
bt = x_hat - (lr / (prob[ii] * num_blocks)) * gradient
bt[np.argsort(np.abs(bt))[0:p - s]] = 0.
x_hat = bt
if np.linalg.norm(x_hat) >= 1e3: # diverge cases.
break
if np.linalg.norm(y_tr - np.dot(x_mat, x_hat)) <= tol_algo:
break
run_time = time.time() - start_time
return num_epochs, run_time, x_hat
def cv_sto_iht(x_tr_mat, y_tr, x_va_mat, y_va, max_epochs, s, x_star, x0,
tol_algo, b_list, lr_list):
""" Tuning parameter by using additional validation dataset. """
test_err_mat = np.zeros(len(lr_list) * len(b_list))
para_dict = dict()
x_hat_dict = dict()
for index, (lr, b) in enumerate(product(lr_list, b_list)):
num_epochs, run_time, x_hat = algo_sto_iht(
x_mat=x_tr_mat, y_tr=y_tr, max_epochs=max_epochs,
lr=lr, s=s, x0=x0, tol_algo=tol_algo, b=b)
y_err = np.linalg.norm(y_va - np.dot(x_va_mat, x_hat)) ** 2.
test_err_mat[index] = y_err
para_dict[index] = (lr, b)
x_hat_dict[(lr, b)] = (num_epochs, run_time, x_hat)
lr, b = para_dict[int(np.argmin(test_err_mat))]
err = np.linalg.norm(x_star - x_hat_dict[(lr, b)][2])
num_epochs, run_time = x_hat_dict[(lr, b)][:2]
return err, num_epochs, run_time
def algo_graph_iht(
x_mat, y_tr, max_epochs, lr, x0, tol_algo, edges, costs, g, s,
root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):
""" Graph Iterative Hard Thresholding proposed in [4] and projection
operator is proposed in [2].
:param x_mat: the design matrix.
:param y_tr: the array of measurements.
:param max_epochs: the maximum epochs (iterations) allowed.
:param lr: the learning | |
program name
:return: Tuple of:
1) Argparse Namespace of parsed arguments
2) Dictionary of user-specified arguments
"""
# Parse the input flags using argparse
parser = get_parser(argv[0])
parsed_args = parser.parse_args(argv[1:])
# Break down inputs to keep track of arguments and values specified directly by the user
user_args = get_user_inputs(argv, parsed_args)
# Set up the logger
log_file = parsed_args.out + ".log"
if parsed_args.quiet:
log_level = logging.WARN
elif parsed_args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
set_up_logger(log_file, log_level)
# Log header and other information
logging.info(HEADER)
logging.info("See full log at: %s\n", os.path.abspath(log_file))
logging.info("\nProgram executed via:\n%s\n", format_terminal_call(argv))
return parsed_args, user_args
#################################
# Disable pylint branch warning because we actually need all these checks
def validate_reg_options(pargs: argp.Namespace, internal_values: Dict[str, Any]): # pylint: disable=too-many-branches
"""
Responsible for validating regression-related inputs and populating the internal map with
values related to this processing
:param pargs: Result of argparse parsing user command / flags
:param internal_values: Dictionary containing internal values that might be updated
in this function
"""
# Process regression coefficient options
num_pops = len(pargs.sumstats)
num_pops_sq = num_pops * num_pops
# 1) LD coefs
ld_coef_matrix = getattr(pargs, "reg_ld_coef", None)
if ld_coef_matrix is not None:
if len(ld_coef_matrix) != num_pops * num_pops:
raise RuntimeError("Expected a matrix with %s elements for regression coefficients "
"(LD) but got %s." % (num_pops_sq, len(ld_coef_matrix)))
internal_values[REG_LD_COEF_OPT] = ld_coef_matrix.reshape((num_pops, num_pops))
internal_values[REG_LD_COEF_SCALE_COEF] = None
elif getattr(pargs, "reg_ld_set_corr", None):
internal_values[REG_LD_COEF_OPT] = MAMA_REG_OPT_SET_CORR
internal_values[REG_LD_COEF_SCALE_COEF] = getattr(pargs, "reg_ld_set_corr")
else:
internal_values[REG_LD_COEF_OPT] = MAMA_REG_OPT_ALL_FREE
internal_values[REG_LD_COEF_SCALE_COEF] = None
logging.debug("Regression coeffient option (LD) = %s", internal_values[REG_LD_COEF_OPT])
logging.debug("Regression coeffient option (LD Scale) = %s",
internal_values[REG_LD_COEF_SCALE_COEF])
# 2) Intercept coefs
int_coef_matrix = getattr(pargs, "reg_int_coef", None)
if int_coef_matrix is not None:
if len(int_coef_matrix) != num_pops * num_pops:
raise RuntimeError("Expected a matrix with %s elements for regression coefficients "
"(intercept) but got %s." % (num_pops_sq, len(int_coef_matrix)))
internal_values[REG_INT_COEF_OPT] = int_coef_matrix.reshape((num_pops, num_pops))
elif getattr(pargs, "reg_int_zero", None):
internal_values[REG_INT_COEF_OPT] = MAMA_REG_OPT_ALL_ZERO
elif getattr(pargs, "reg_int_diag", None):
internal_values[REG_INT_COEF_OPT] = MAMA_REG_OPT_OFFDIAG_ZERO
else:
internal_values[REG_INT_COEF_OPT] = MAMA_REG_OPT_ALL_FREE
logging.debug("Regression coeffient option (Intercept) = %s", internal_values[REG_INT_COEF_OPT])
# 3) SE^2 coefs
se2_coef_matrix = getattr(pargs, "reg_se2_coef", None)
if se2_coef_matrix is not None:
if len(se2_coef_matrix) != num_pops * num_pops:
raise RuntimeError("Expected a matrix with %s elements for regression coefficients "
"(SE^2) but got %s." % (num_pops_sq, len(se2_coef_matrix)))
internal_values[REG_SE2_COEF_OPT] = se2_coef_matrix.reshape((num_pops, num_pops))
elif getattr(pargs, "reg_se2_zero", None):
internal_values[REG_SE2_COEF_OPT] = MAMA_REG_OPT_ALL_ZERO
elif getattr(pargs, "reg_se2_ident", None):
internal_values[REG_SE2_COEF_OPT] = MAMA_REG_OPT_IDENT
elif getattr(pargs, "reg_se2_diag", None):
internal_values[REG_SE2_COEF_OPT] = MAMA_REG_OPT_OFFDIAG_ZERO
else:
internal_values[REG_SE2_COEF_OPT] = MAMA_REG_OPT_ALL_FREE
logging.debug("Regression coeffient option (SE^2) = %s", internal_values[REG_SE2_COEF_OPT])
#################################
def construct_re_map(pargs: argp.Namespace) -> Dict[str, str]:
"""
Responsible for constructing the regular expressions map for column matching used by this
execution of the MAMA program. It begins with the default map and then adjusts it based on
user argparse inputs.
:param pargs: Result of argparse parsing user command / flags
:return: Dictionary that maps regular expressions to standard column names
(used for column matching used by this execution of the MAMA program)
"""
re_map = MAMA_RE_EXPR_MAP.copy()
for req_col in MAMA_REQ_STD_COLS:
additional_re = getattr(pargs, to_arg(MAMA_RE_ADD_FLAGS[req_col]), None)
replacement_re = getattr(pargs, to_arg(MAMA_RE_REPLACE_FLAGS[req_col]), None)
if additional_re:
re_map[req_col] = "%s|%s" % (re_map[req_col], additional_re)
elif replacement_re:
re_map[req_col] = replacement_re
logging.debug("\nRegex map = %s", re_map)
return re_map
#################################
def construct_filter_map(pargs: argp.Namespace) -> Dict[str, Tuple[Filter, str]]:
"""
Responsible for constructing the sumstats filter map for QC of GWAS used by this
execution of the MAMA program. It begins with the default map and then adjusts it based on
user argparse inputs.
:param pargs: Result of argparse parsing user command / flags
:return: Dictionary that maps names of filters to the function and description of the filter
(used for GWAS QC by this execution of the MAMA program)
"""
filt_map = MAMA_STD_FILTERS.copy()
if getattr(pargs, "freq_bounds", None):
if pargs.freq_bounds[0] > pargs.freq_bounds[1]:
raise RuntimeError("Minimum MAF (%s) must be less than maximum MAF (%s) " %
(pargs.freq_bounds[0], pargs.freq_bounds[1]))
filt_map[FREQ_FILTER] = (create_freq_filter(pargs.freq_bounds[0], pargs.freq_bounds[1]),
"Filters out SNPs with FREQ values outside of [%s, %s]" %
(pargs.freq_bounds[0], pargs.freq_bounds[1]))
if getattr(pargs, "allowed_chr_values", None):
filt_map[CHR_FILTER] = (create_chr_filter(pargs.allowed_chr_values),
"Filters out SNPs with listed chromosomes not in %s" %
pargs.allowed_chr_values)
if getattr(pargs, "allow_palindromic_snps", None):
del filt_map[SNP_PALIN_FILT]
logging.debug("\nFilter map = %s\n", filt_map)
return filt_map
#################################
def construct_ss_and_col_maps(pargs: argp.Namespace, re_map: Dict[str, str])\
-> Tuple[Dict[str, Dict[str, str]], Dict[PopulationId, str]]:
"""
Responsible for constructing:
1) the map between population ID and column map used for mapping sumstats columns
2) the map between population ID (ancestry + phenotype) and summary stats filename
:param pargs: Result of argparse parsing user command / flags
:return: Tuple containing:
1) the map between population ID and column map used for mapping sumstats columns
2) the map between population ID (ancestry + phenotype) and summary stats filename
"""
col_map = dict()
ss_map = dict()
for ss_file, anc, phen in pargs.sumstats:
cols = list(pd.read_csv(ss_file, sep=None, engine='python', nrows=1, comment="#").columns)
ss_map[(anc, phen)] = ss_file
try:
col_map[(anc, phen)] = determine_column_mapping(cols, re_map, MAMA_REQ_STD_COLS)
except RuntimeError as exc:
raise RuntimeError("Column mapping error for summary statistics file %s (ancestry = "
"%s and phenotype = %s): %s" % (ss_file, anc, phen, exc)) from exc
return col_map, ss_map
#################################
def validate_inputs(pargs: argp.Namespace, user_args: Dict[str, Any]):
"""
Responsible for coordinating whatever initial validation of inputs can be done
:param pargs: Result of argparse parsing user command / flags
:param user_args: Flags explicitly set by the user along with their values
:return: Dictionary that contains flags and parameters needed by this program. It contains
user-input flags along with defaults set through argparse, and any additional flags
added as calculations proceed
"""
# Log user-specified arguments
logging.debug("\nProgram was called with the following arguments:\n%s", user_args)
# Prepare dictionary that will hold internal values for this program
internal_values = dict()
# Get output directory
internal_values[OUT_DIR] = os.path.dirname(pargs.out)
# Validate columns of the LD scores file(s)
for ld_score_file in pargs.ld_scores:
ld_cols = set(
pd.read_csv(ld_score_file, sep=None, engine='python', nrows=1, comment="#").columns)
ancestries = {a for ss_file, a, p in pargs.sumstats}
anc_tuples = itertools.combinations_with_replacement(ancestries, 2)
missing_ld_pair_cols = {anc_tuple for anc_tuple in anc_tuples
if not("%s_%s" % anc_tuple in ld_cols or
"%s_%s" % anc_tuple[::-1] in ld_cols)}
if missing_ld_pair_cols:
raise RuntimeError("The LD scores file %s is missing columns for the following "
"ancestry pairs: %s" % (ld_score_file, missing_ld_pair_cols))
if SNP_COL not in ld_cols:
raise RuntimeError("The LD scores file %s is missing SNP column \"%s\"" %
(ld_score_file, SNP_COL))
# Construct RE map for sumstats column matching (must be done before verifying sumstats columns)
internal_values[RE_MAP] = construct_re_map(pargs)
# Construct maps of pop ID to sumstats file and to column mappings (validate along the way)
internal_values[COL_MAP], internal_values[SUMSTATS_MAP] =\
construct_ss_and_col_maps(pargs, internal_values[RE_MAP])
# Create filter map to use for summary statistics
internal_values[FILTER_MAP] = construct_filter_map(pargs)
# Validate and process regression options
validate_reg_options(pargs, internal_values)
# If harmonized summary statistics should be written to disk, determine filename format string
internal_values[HARM_FILENAME_FSTR] = pargs.out + "_%s_%s" + HARMONIZED_SUFFIX \
if getattr(pargs, "out_harmonized", None) else ""
# If regression coefficients should be written to disk, determine filename format string
internal_values[REG_FILENAME_FSTR] = pargs.out + "_%s_" + LD_COEF_SUFFIX \
if getattr(pargs, "out_reg_coef", None) else ""
# Copy attributes to the internal dictionary from parsed args
for attr in vars(pargs):
internal_values[attr] = getattr(pargs, attr)
# Set some extra values based on parsed arg values
internal_values[OUT_PREFIX] = os.path.basename(pargs.out)
internal_values[ANCESTRIES] = ancestries
return internal_values
#################################
def main_func(argv: List[str]):
"""
Main function that should handle all the top-level processing for this program
:param argv: List of arguments passed to the program (meant to be sys.argv)
"""
# Perform argument parsing and program setup
parsed_args, user_args = setup_func(argv, get_mama_parser)
# Set Numpy error handling to shunt error messages to a logging function
np.seterr(all='call')
np.seterrcall(numpy_err_handler)
# Attempt to print package version info (pandas has a nice version info summary)
if logging.root.level <= logging.DEBUG:
logging.debug("Printing Pandas' version summary:")
with contextlib.redirect_stdout(io.StringIO()) as f:
pd.show_versions()
logging.debug("%s\n", f.getvalue())
# Execute the rest of the program, but catch and log exceptions before failing
try:
# Validate user inputs and create internal dictionary
iargs = validate_inputs(parsed_args, user_args)
# Run the MAMA pipeline
result_sumstats = mama_pipeline(iargs[SUMSTATS_MAP], iargs['ld_scores'], iargs['snp_list'],
iargs[COL_MAP], iargs[RE_MAP], iargs[FILTER_MAP],
iargs[REG_LD_COEF_OPT], iargs[REG_SE2_COEF_OPT],
iargs[REG_INT_COEF_OPT], iargs[REG_LD_COEF_SCALE_COEF],
iargs['use_standardized_units'], iargs[HARM_FILENAME_FSTR],
iargs[REG_FILENAME_FSTR], iargs['input_sep'])
# Write out the summary statistics to disk
logging.info("Writing | |
shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
name_scope(str) : The name for this class.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple|None): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the Conv2d Layer. According to grouped
convolution in <NAME>'s Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform( -1, 1, [10, 3, 32, 32] ).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D( "conv2d", 2, 3)
data = to_variable( data )
conv = conv2d( data )
"""
def __init__(self,
name_scope,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
epsilon=1e-30,
dtype="float32"):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name_scope, dtype)
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, "stride")
self._padding = utils.convert_to_list(padding, 2, "padding")
self._dilation = utils.convert_to_list(dilation, 2, "dilation")
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._epsilon = epsilon
self._dtype = dtype
# if (self._num_channels == self._groups and
# num_filters % self._num_channels == 0 and not self._use_cudnn):
# self._l_type = 'depthwise_conv2d'
# else:
# TODO(jiabin): recover the usage of depthwise_conv2d when it's
# kernel fixed https://github.com/PaddlePaddle/Paddle/issues/17275
self._l_type = "conv2d"
def _build_once(self, input):
self._num_channels = input.shape[1]
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 2, "filter_size")
filter_shape = [self._num_filters, int(num_filter_channels)
] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[
1] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
# weight_v
self._filter_param_v = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
# weight_g
norm_value = _norm(
self._filter_param_v.numpy(), dim=0) # CAUTION: hard-code
self._filter_param_g = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
norm_value)),
shape=norm_value.shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
if self._use_cudnn:
self.create_variable(
name="kCUDNNFwdAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self.create_variable(
name="kCUDNNBwdDataAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self.create_variable(
name="kCUDNNBwdFilterAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self._bias_param = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
matrix = self._helper.create_variable_for_type_inference(self._dtype)
tmp = self._helper.create_variable_for_type_inference(self._dtype)
new_shape = [
self._filter_param_v.shape[0],
reduce(lambda x, y: x * y, self._filter_param_v.shape[1:], 1),
]
self._helper.append_op(
type="reshape2",
inputs={"X": self._filter_param_v},
attrs={"shape": new_shape},
outputs={"Out": matrix,
"XShape": tmp})
m_norm = self._helper.create_variable_for_type_inference(self._dtype)
m_normalized = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="norm",
inputs={"X": matrix},
outputs={"Out": m_normalized,
"Norm": m_norm},
attrs={"axis": 1,
"epsilon": self._epsilon})
v_normalized = self._helper.create_variable_for_type_inference(
self._dtype)
tmp2 = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="reshape2",
inputs={"X": m_normalized},
attrs={"shape": self._filter_param_v.shape},
outputs={"Out": v_normalized,
"XShape": tmp2})
filter_param = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="elementwise_mul",
inputs={"X": [v_normalized],
"Y": [self._filter_param_g]},
outputs={"Out": [filter_param]},
attrs={"axis": 0}, # CAUTION: hard-code
)
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"Input": input,
"Filter": filter_param},
outputs={"Output": pre_bias},
attrs={
"strides": self._stride,
"paddings": self._padding,
"dilations": self._dilation,
"groups": self._groups if self._groups else 1,
"use_cudnn": self._use_cudnn,
"use_mkldnn": False,
})
if self._bias_param is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="elementwise_add",
inputs={"X": [pre_bias],
"Y": [self._bias_param]},
outputs={"Out": [pre_act]},
attrs={"axis": 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act)
class Conv2DTranspose(dg.Layer):
"""
**Convlution2D transpose layer**
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma ((Vg) \\ast X + b)
Where:
* :math:`X`: Input value, a tensor with NCHW format.
* :math:`V`: Filter value, a tensor with MCHW format.
* :math:`g`: Filter value, a tensor with M format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Args:
name_scope(str): The name of this class.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple|None): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to
calculate filter_size. Default: None.
padding(int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
stride(int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
dilation(int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups(int): The | |
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def credential_find(self, scope_id, credential_id, **kwargs): # noqa: E501
"""Get a Credential # noqa: E501
Returns the Credential specified by the \"credentialId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_find(scope_id, credential_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Credential. (required)
:param str credential_id: The id of the requested Credential (required)
:return: Credential
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.credential_find_with_http_info(scope_id, credential_id, **kwargs) # noqa: E501
else:
(data) = self.credential_find_with_http_info(scope_id, credential_id, **kwargs) # noqa: E501
return data
def credential_find_with_http_info(self, scope_id, credential_id, **kwargs): # noqa: E501
"""Get a Credential # noqa: E501
Returns the Credential specified by the \"credentialId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_find_with_http_info(scope_id, credential_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Credential. (required)
:param str credential_id: The id of the requested Credential (required)
:return: Credential
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'credential_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method credential_find" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `credential_find`") # noqa: E501
# verify the required parameter 'credential_id' is set
if ('credential_id' not in params or
params['credential_id'] is None):
raise ValueError("Missing the required parameter `credential_id` when calling `credential_find`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'credential_id' in params:
path_params['credentialId'] = params['credential_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/credentials/{credentialId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Credential', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def credential_query(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the Credentials # noqa: E501
Queries the Credentials with the given CredentialQuery parameter returning all matching Credentials # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_query(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param CredentialQuery body: The CredentialQuery to use to filter results. (required)
:return: CredentialListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.credential_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.credential_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def credential_query_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the Credentials # noqa: E501
Queries the Credentials with the given CredentialQuery parameter returning all matching Credentials # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_query_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param CredentialQuery body: The CredentialQuery to use to filter results. (required)
:return: CredentialListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method credential_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `credential_query`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `credential_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/credentials/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CredentialListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def credential_simple_query(self, scope_id, limit, **kwargs): # noqa: E501
"""Gets the Credential list in the scope # noqa: E501
Returns the list of all the credentials associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_simple_query(scope_id, limit, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param int limit: The result set limit. (required)
:param str user_id: The optional id to filter results.
:param int offset: The result set offset.
:return: CredentialListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.credential_simple_query_with_http_info(scope_id, limit, **kwargs) # noqa: E501
else:
(data) = self.credential_simple_query_with_http_info(scope_id, limit, **kwargs) # noqa: E501
return data
def credential_simple_query_with_http_info(self, scope_id, limit, **kwargs): # noqa: E501
"""Gets the Credential list in the scope # noqa: E501
Returns the list of all the credentials associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_simple_query_with_http_info(scope_id, limit, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param int limit: The result set limit. (required)
:param str user_id: The optional id to filter results.
:param int offset: The result set offset.
:return: CredentialListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'limit', 'user_id', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method credential_simple_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `credential_simple_query`") # noqa: E501
# verify the required parameter 'limit' is set
if ('limit' not in params or
params['limit'] is None):
raise ValueError("Missing the required parameter `limit` when calling `credential_simple_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
if 'user_id' in params:
query_params.append(('userId', params['user_id'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/credentials', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CredentialListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def credential_unlock(self, scope_id, credential_id, **kwargs): # noqa: E501
"""Unlock a Credential # noqa: E501
| |
from firedrake import op2, assemble, dot, dx, FunctionSpace, Function, sqrt, \
TestFunction, TrialFunction, Constant, grad, inner, \
LinearVariationalProblem, LinearVariationalSolver, FacetNormal, \
ds, ds_b, ds_v, ds_t, dS_v, div, avg, jump, DirichletBC, BrokenElement, \
TensorFunctionSpace, SpatialCoordinate, VectorFunctionSpace, as_vector
from abc import ABCMeta, abstractmethod, abstractproperty
from gusto import thermodynamics
from gusto.recovery import Recoverer, Boundary_Method
import numpy as np
__all__ = ["Diagnostics", "CourantNumber", "VelocityX", "VelocityZ", "VelocityY", "Gradient",
"SphericalComponent", "MeridionalComponent", "ZonalComponent", "RadialComponent",
"RichardsonNumber", "Energy", "KineticEnergy", "ShallowWaterKineticEnergy",
"ShallowWaterPotentialEnergy", "ShallowWaterPotentialEnstrophy",
"CompressibleKineticEnergy", "ExnerPi", "Sum", "Difference", "SteadyStateError",
"Perturbation", "Theta_e", "InternalEnergy", "PotentialEnergy",
"ThermodynamicKineticEnergy", "Dewpoint", "Temperature", "Theta_d",
"RelativeHumidity", "Pressure", "Pi_Vt", "HydrostaticImbalance", "Precipitation",
"PotentialVorticity", "RelativeVorticity", "AbsoluteVorticity"]
class Diagnostics(object):
available_diagnostics = ["min", "max", "rms", "l2", "total"]
def __init__(self, *fields):
self.fields = list(fields)
def register(self, *fields):
fset = set(self.fields)
for f in fields:
if f not in fset:
self.fields.append(f)
@staticmethod
def min(f):
fmin = op2.Global(1, np.finfo(float).max, dtype=float)
op2.par_loop(op2.Kernel("""
static void minify(double *a, double *b) {
a[0] = a[0] > fabs(b[0]) ? fabs(b[0]) : a[0];
}
""", "minify"), f.dof_dset.set, fmin(op2.MIN), f.dat(op2.READ))
return fmin.data[0]
@staticmethod
def max(f):
fmax = op2.Global(1, np.finfo(float).min, dtype=float)
op2.par_loop(op2.Kernel("""
static void maxify(double *a, double *b) {
a[0] = a[0] < fabs(b[0]) ? fabs(b[0]) : a[0];
}
""", "maxify"), f.dof_dset.set, fmax(op2.MAX), f.dat(op2.READ))
return fmax.data[0]
@staticmethod
def rms(f):
area = assemble(1*dx(domain=f.ufl_domain()))
return sqrt(assemble(inner(f, f)*dx)/area)
@staticmethod
def l2(f):
return sqrt(assemble(inner(f, f)*dx))
@staticmethod
def total(f):
if len(f.ufl_shape) == 0:
return assemble(f * dx)
else:
pass
class DiagnosticField(object, metaclass=ABCMeta):
def __init__(self, required_fields=()):
self._initialised = False
self.required_fields = required_fields
@abstractproperty
def name(self):
"""The name of this diagnostic field"""
pass
def setup(self, state, space=None):
if not self._initialised:
if space is None:
space = state.spaces("DG0", state.mesh, "DG", 0)
self.field = state.fields(self.name, space, pickup=False)
self._initialised = True
@abstractmethod
def compute(self, state):
""" Compute the diagnostic field from the current state"""
pass
def __call__(self, state):
return self.compute(state)
class CourantNumber(DiagnosticField):
name = "CourantNumber"
def setup(self, state):
if not self._initialised:
super(CourantNumber, self).setup(state)
# set up area computation
V = state.spaces("DG0")
test = TestFunction(V)
self.area = Function(V)
assemble(test*dx, tensor=self.area)
def compute(self, state):
u = state.fields("u")
dt = Constant(state.timestepping.dt)
return self.field.project(sqrt(dot(u, u))/sqrt(self.area)*dt)
class VelocityX(DiagnosticField):
name = "VelocityX"
def setup(self, state):
if not self._initialised:
space = state.spaces("CG1", state.mesh, "CG", 1)
super(VelocityX, self).setup(state, space=space)
def compute(self, state):
u = state.fields("u")
uh = u[0]
return self.field.interpolate(uh)
class VelocityZ(DiagnosticField):
name = "VelocityZ"
def setup(self, state):
if not self._initialised:
space = state.spaces("CG1", state.mesh, "CG", 1)
super(VelocityZ, self).setup(state, space=space)
def compute(self, state):
u = state.fields("u")
w = u[u.geometric_dimension() - 1]
return self.field.interpolate(w)
class VelocityY(DiagnosticField):
name = "VelocityY"
def setup(self, state):
if not self._initialised:
space = state.spaces("CG1", state.mesh, "CG", 1)
super(VelocityY, self).setup(state, space=space)
def compute(self, state):
u = state.fields("u")
v = u[1]
return self.field.interpolate(v)
class Gradient(DiagnosticField):
def __init__(self, name):
super().__init__()
self.fname = name
@property
def name(self):
return self.fname+"_gradient"
def setup(self, state):
if not self._initialised:
mesh_dim = state.mesh.geometric_dimension()
try:
field_dim = state.fields(self.fname).ufl_shape[0]
except IndexError:
field_dim = 1
shape = (mesh_dim, ) * field_dim
space = TensorFunctionSpace(state.mesh, "CG", 1, shape=shape)
super().setup(state, space=space)
f = state.fields(self.fname)
test = TestFunction(space)
trial = TrialFunction(space)
n = FacetNormal(state.mesh)
a = inner(test, trial)*dx
L = -inner(div(test), f)*dx
if space.extruded:
L += dot(dot(test, n), f)*(ds_t + ds_b)
prob = LinearVariationalProblem(a, L, self.field)
self.solver = LinearVariationalSolver(prob)
def compute(self, state):
self.solver.solve()
return self.field
class SphericalComponent(DiagnosticField):
def __init__(self, name):
super().__init__()
self.fname = name
def setup(self, state):
if not self._initialised:
# check geometric dimension is 3D
if state.mesh.geometric_dimension() != 3:
raise ValueError('Spherical components only work when the geometric dimension is 3!')
space = FunctionSpace(state.mesh, "CG", 1)
super().setup(state, space=space)
V = VectorFunctionSpace(state.mesh, "CG", 1)
self.x, self.y, self.z = SpatialCoordinate(state.mesh)
self.x_hat = Function(V).interpolate(Constant(as_vector([1.0, 0.0, 0.0])))
self.y_hat = Function(V).interpolate(Constant(as_vector([0.0, 1.0, 0.0])))
self.z_hat = Function(V).interpolate(Constant(as_vector([0.0, 0.0, 1.0])))
self.R = sqrt(self.x**2 + self.y**2) # distance from z axis
self.r = sqrt(self.x**2 + self.y**2 + self.z**2) # distance from origin
self.f = state.fields(self.fname)
if np.prod(self.f.ufl_shape) != 3:
raise ValueError('Components can only be found of a vector function space in 3D.')
class MeridionalComponent(SphericalComponent):
@property
def name(self):
return self.fname+"_meridional"
def compute(self, state):
lambda_hat = (-self.x * self.z * self.x_hat / self.R
- self.y * self.z * self.y_hat / self.R
+ self.R * self.z_hat) / self.r
return self.field.project(dot(self.f, lambda_hat))
class ZonalComponent(SphericalComponent):
@property
def name(self):
return self.fname+"_zonal"
def compute(self, state):
phi_hat = (self.x * self.y_hat - self.y * self.x_hat) / self.R
return self.field.project(dot(self.f, phi_hat))
class RadialComponent(SphericalComponent):
@property
def name(self):
return self.fname+"_radial"
def compute(self, state):
r_hat = (self.x * self.x_hat + self.y * self.y_hat + self.z * self.z_hat) / self.r
return self.field.project(dot(self.f, r_hat))
class RichardsonNumber(DiagnosticField):
name = "RichardsonNumber"
def __init__(self, density_field, factor=1.):
super().__init__(required_fields=(density_field, "u_gradient"))
self.density_field = density_field
self.factor = Constant(factor)
def setup(self, state):
rho_grad = self.density_field+"_gradient"
super().setup(state)
self.grad_density = state.fields(rho_grad)
self.gradu = state.fields("u_gradient")
def compute(self, state):
denom = 0.
z_dim = state.mesh.geometric_dimension() - 1
u_dim = state.fields("u").ufl_shape[0]
for i in range(u_dim-1):
denom += self.gradu[i, z_dim]**2
Nsq = self.factor*self.grad_density[z_dim]
self.field.interpolate(Nsq/denom)
return self.field
class Energy(DiagnosticField):
def kinetic(self, u, factor=None):
"""
Computes 0.5*dot(u, u) with an option to multiply rho
"""
if factor is not None:
energy = 0.5*factor*dot(u, u)
else:
energy = 0.5*dot(u, u)
return energy
class KineticEnergy(Energy):
name = "KineticEnergy"
def compute(self, state):
u = state.fields("u")
energy = self.kinetic(u)
return self.field.interpolate(energy)
class ShallowWaterKineticEnergy(Energy):
name = "ShallowWaterKineticEnergy"
def compute(self, state):
u = state.fields("u")
D = state.fields("D")
energy = self.kinetic(u, D)
return self.field.interpolate(energy)
class ShallowWaterPotentialEnergy(Energy):
name = "ShallowWaterPotentialEnergy"
def compute(self, state):
g = state.parameters.g
D = state.fields("D")
energy = 0.5*g*D**2
return self.field.interpolate(energy)
class ShallowWaterPotentialEnstrophy(DiagnosticField):
def __init__(self, base_field_name="PotentialVorticity"):
super().__init__()
self.base_field_name = base_field_name
@property
def name(self):
base_name = "SWPotentialEnstrophy"
return "_from_".join((base_name, self.base_field_name))
def compute(self, state):
if self.base_field_name == "PotentialVorticity":
pv = state.fields("PotentialVorticity")
D = state.fields("D")
enstrophy = 0.5*pv**2*D
elif self.base_field_name == "RelativeVorticity":
zeta = state.fields("RelativeVorticity")
D = state.fields("D")
f = state.fields("coriolis")
enstrophy = 0.5*(zeta + f)**2/D
elif self.base_field_name == "AbsoluteVorticity":
zeta_abs = state.fields("AbsoluteVorticity")
D = state.fields("D")
enstrophy = 0.5*(zeta_abs)**2/D
else:
raise ValueError("Don't know how to compute enstrophy with base_field_name=%s; base_field_name should be %s %s or %s." % (self.base_field_name, "RelativeVorticity", "AbsoluteVorticity", "PotentialVorticity"))
return self.field.interpolate(enstrophy)
class CompressibleKineticEnergy(Energy):
name = "CompressibleKineticEnergy"
def compute(self, state):
u = state.fields("u")
rho = state.fields("rho")
energy = self.kinetic(u, rho)
return self.field.interpolate(energy)
class ExnerPi(DiagnosticField):
def __init__(self, reference=False):
super(ExnerPi, self).__init__()
self.reference = reference
if reference:
self.rho_name = "rhobar"
self.theta_name = "thetabar"
else:
self.rho_name = "rho"
self.theta_name = "theta"
@property
def name(self):
if self.reference:
return "ExnerPibar"
else:
return "ExnerPi"
def setup(self, state):
if not self._initialised:
space = state.spaces("CG1", state.mesh, "CG", 1)
super(ExnerPi, self).setup(state, space=space)
def compute(self, state):
rho = state.fields(self.rho_name)
theta = state.fields(self.theta_name)
Pi = thermodynamics.pi(state.parameters, rho, theta)
return self.field.interpolate(Pi)
class Sum(DiagnosticField):
def __init__(self, field1, field2):
super().__init__(required_fields=(field1, field2))
self.field1 = field1
self.field2 = field2
@property
def name(self):
return self.field1+"_plus_"+self.field2
def setup(self, state):
if not self._initialised:
space = state.fields(self.field1).function_space()
super(Sum, self).setup(state, space=space)
def compute(self, state):
field1 = state.fields(self.field1)
field2 = state.fields(self.field2)
return self.field.assign(field1 + field2)
class Difference(DiagnosticField):
def __init__(self, field1, field2):
super().__init__(required_fields=(field1, field2))
self.field1 = field1
self.field2 = field2
@property
def name(self):
return self.field1+"_minus_"+self.field2
def setup(self, state):
if not self._initialised:
space = state.fields(self.field1).function_space()
super(Difference, self).setup(state, space=space)
def compute(self, state):
field1 = state.fields(self.field1)
field2 = state.fields(self.field2)
return self.field.assign(field1 - field2)
class SteadyStateError(Difference):
def __init__(self, state, name):
DiagnosticField.__init__(self)
self.field1 = name
self.field2 = name+'_init'
field1 = state.fields(name)
field2 = state.fields(self.field2, field1.function_space())
field2.assign(field1)
@property
def name(self):
return self.field1+"_error"
class Perturbation(Difference):
def __init__(self, name):
self.field1 = name
self.field2 = name+'bar'
DiagnosticField.__init__(self, required_fields=(self.field1, self.field2))
@property
def name(self):
return self.field1+"_perturbation"
class ThermodynamicDiagnostic(DiagnosticField):
name = "thermodynamic_diagnostic"
def setup(self, state):
if not self._initialised:
space = state.fields("theta").function_space()
broken_space = FunctionSpace(state.mesh, BrokenElement(space.ufl_element()))
boundary_method = Boundary_Method.physics if (state.vertical_degree == 0 and state.horizontal_degree == 0) else None
super().setup(state, space=space)
# now let's attach all of our fields
self.u = state.fields("u")
self.rho = state.fields("rho")
self.theta = state.fields("theta")
self.rho_averaged = Function(space)
self.recoverer = Recoverer(self.rho, self.rho_averaged, VDG=broken_space, boundary_method=boundary_method)
try:
self.r_v = state.fields("water_v")
except NotImplementedError:
self.r_v = Constant(0.0)
try:
self.r_c = state.fields("water_c")
except NotImplementedError:
self.r_c = Constant(0.0)
try:
self.rain = state.fields("rain")
except NotImplementedError:
self.rain = Constant(0.0)
# now let's store the most common expressions
self.pi = thermodynamics.pi(state.parameters, self.rho_averaged, self.theta)
self.T = thermodynamics.T(state.parameters, self.theta, self.pi, r_v=self.r_v)
self.p = thermodynamics.p(state.parameters, self.pi)
self.r_l = self.r_c + self.rain
self.r_t = self.r_v + self.r_c + self.rain
def compute(self, state):
self.recoverer.project()
class Theta_e(ThermodynamicDiagnostic):
name = "Theta_e"
def compute(self, state):
super().compute(state)
return self.field.assign(thermodynamics.theta_e(state.parameters, self.T, self.p, self.r_v, self.r_t))
class InternalEnergy(ThermodynamicDiagnostic):
name = "InternalEnergy"
def compute(self, state):
super().compute(state)
return self.field.assign(thermodynamics.internal_energy(state.parameters, self.rho_averaged, self.T, r_v=self.r_v, r_l=self.r_l))
class PotentialEnergy(ThermodynamicDiagnostic):
| |
N.cg_prog,
('CGINCLUDE', N.cg_prog_body, 'ENDCG', )),
Production("fall_back_cmd -> 'FallBack' String",
'p117',
N.fall_back_cmd,
('FallBack', T.String, )),
Production("fall_back_cmd -> 'FallBack' 'Off'",
'p118',
N.fall_back_cmd,
('FallBack', 'Off', )),
Production("custom_editor_cmd -> 'CustomEditor' String",
'p119',
N.custom_editor_cmd,
('CustomEditor', T.String, )),
Production("dependency_cmd -> 'Dependency' String = String",
'p120',
N.dependency_cmd,
('Dependency', T.String, T.Assign, T.String, )),
Production("id_list -> ID",
'p121',
N.id_list,
(T.ID, )),
Production("id_list -> ID id_list",
'p122',
N.id_list,
(T.ID, N.id_list, )),
Production("cg_prog_body -> cg_stms",
'p123',
N.cg_prog_body,
(N.cg_stms, )),
Production("cg_stms -> cg_stm cg_stms",
'p124',
N.cg_stms,
(N.cg_stm, N.cg_stms, )),
Production("cg_stms -> ",
'p125',
N.cg_stms,
()),
Production("cg_stm -> preprocessing_stm",
'p126',
N.cg_stm,
(N.preprocessing_stm, )),
Production("cg_stm -> function_definition",
'p127',
N.cg_stm,
(N.function_definition, )),
Production("cg_stm -> dec",
'p128',
N.cg_stm,
(N.dec, )),
Production("cg_stm -> 'CBUFFER_START' ( ID ) dec_list 'CBUFFER_END'",
'p129',
N.cg_stm,
('CBUFFER_START', T.LParen, T.ID, T.RParen, N.dec_list, 'CBUFFER_END', )),
Production("function_definition -> dec_specifier declarator compound_stm",
'p130',
N.function_definition,
(N.dec_specifier, N.declarator, N.compound_stm, )),
Production("function_definition -> dec_specifier declarator : ID compound_stm",
'p131',
N.function_definition,
(N.dec_specifier, N.declarator, T.Colon, T.ID, N.compound_stm, )),
Production("function_definition -> [ ID ( Number ) ] dec_specifier declarator compound_stm",
'p132',
N.function_definition,
(T.LBrack, T.ID, T.LParen, T.Number, T.RParen, T.RBrack, N.dec_specifier, N.declarator, N.compound_stm, )),
Production("function_definition -> [ ID ( Number ) ] dec_specifier declarator : ID compound_stm",
'p133',
N.function_definition,
(T.LBrack, T.ID, T.LParen, T.Number, T.RParen, T.RBrack, N.dec_specifier, N.declarator, T.Colon, T.ID, N.compound_stm, )),
Production("preprocessing_stm -> pp_if_stm",
'p134',
N.preprocessing_stm,
(N.pp_if_stm, )),
Production("preprocessing_stm -> pp_cmd",
'p135',
N.preprocessing_stm,
(N.pp_cmd, )),
Production("preprocessing_stm -> marco_unfold",
'p136',
N.preprocessing_stm,
(N.marco_unfold, )),
Production("pp_if_stm -> # 'if' PPTokens",
'p137',
N.pp_if_stm,
(T.Pound, 'if', T.PPTokens, )),
Production("pp_if_stm -> # 'ifdef' ID",
'p138',
N.pp_if_stm,
(T.Pound, 'ifdef', T.ID, )),
Production("pp_if_stm -> # 'ifndef' ID",
'p139',
N.pp_if_stm,
(T.Pound, 'ifndef', T.ID, )),
Production("pp_if_stm -> # 'elif' PPTokens",
'p140',
N.pp_if_stm,
(T.Pound, 'elif', T.PPTokens, )),
Production("pp_if_stm -> # 'else'",
'p141',
N.pp_if_stm,
(T.Pound, 'else', )),
Production("pp_if_stm -> # 'endif'",
'p142',
N.pp_if_stm,
(T.Pound, 'endif', )),
Production("pp_cmd -> # 'include' String",
'p143',
N.pp_cmd,
(T.Pound, 'include', T.String, )),
Production("pp_cmd -> # 'pragma' PPTokens",
'p144',
N.pp_cmd,
(T.Pound, 'pragma', T.PPTokens, )),
Production("pp_cmd -> # 'define' PPTokens",
'p145',
N.pp_cmd,
(T.Pound, 'define', T.PPTokens, )),
Production("marco_unfold -> exp ;",
'p146',
N.marco_unfold,
(N.exp, T.Semicolon, )),
Production("dec_list -> dec",
'p147',
N.dec_list,
(N.dec, )),
Production("dec_list -> dec_list dec",
'p148',
N.dec_list,
(N.dec_list, N.dec, )),
Production("primary_exp -> ID",
'p149',
N.primary_exp,
(T.ID, )),
Production("primary_exp -> String",
'p150',
N.primary_exp,
(T.String, )),
Production("primary_exp -> Number",
'p151',
N.primary_exp,
(T.Number, )),
Production("primary_exp -> ( exp )",
'p152',
N.primary_exp,
(T.LParen, N.exp, T.RParen, )),
Production("postfix_exp -> primary_exp",
'p153',
N.postfix_exp,
(N.primary_exp, )),
Production("postfix_exp -> postfix_exp [ exp ]",
'p154',
N.postfix_exp,
(N.postfix_exp, T.LBrack, N.exp, T.RBrack, )),
Production("postfix_exp -> postfix_exp ( )",
'p155',
N.postfix_exp,
(N.postfix_exp, T.LParen, T.RParen, )),
Production("postfix_exp -> postfix_exp ( argument_exp_list )",
'p156',
N.postfix_exp,
(N.postfix_exp, T.LParen, N.argument_exp_list, T.RParen, )),
Production("postfix_exp -> buildin_type_name ( argument_exp_list )",
'p157',
N.postfix_exp,
(N.buildin_type_name, T.LParen, N.argument_exp_list, T.RParen, )),
Production("postfix_exp -> postfix_exp . ID",
'p158',
N.postfix_exp,
(N.postfix_exp, T.Dot, T.ID, )),
Production("postfix_exp -> postfix_exp ++",
'p159',
N.postfix_exp,
(N.postfix_exp, T.Increment, )),
Production("postfix_exp -> postfix_exp --",
'p160',
N.postfix_exp,
(N.postfix_exp, T.Decrement, )),
Production("argument_exp_list -> assignment_exp",
'p161',
N.argument_exp_list,
(N.assignment_exp, )),
Production("argument_exp_list -> argument_exp_list , assignment_exp",
'p162',
N.argument_exp_list,
(N.argument_exp_list, T.Comma, N.assignment_exp, )),
Production("unary_exp -> postfix_exp",
'p163',
N.unary_exp,
(N.postfix_exp, )),
Production("unary_exp -> ++ unary_exp",
'p164',
N.unary_exp,
(T.Increment, N.unary_exp, )),
Production("unary_exp -> -- unary_exp",
'p165',
N.unary_exp,
(T.Decrement, N.unary_exp, )),
Production("unary_exp -> unary_op unary_exp",
'p166',
N.unary_exp,
(N.unary_op, N.unary_exp, )),
Production("unary_op -> +",
'p167',
N.unary_op,
(T.Plus, )),
Production("unary_op -> -",
'p168',
N.unary_op,
(T.Minus, )),
Production("unary_op -> !",
'p169',
N.unary_op,
(T.NOT, )),
Production("unary_op -> ~",
'p170',
N.unary_op,
(T.Tilde, )),
Production("cast_exp -> unary_exp",
'p171',
N.cast_exp,
(N.unary_exp, )),
Production("cast_exp -> ( buildin_type_name ) cast_exp",
'p172',
N.cast_exp,
(T.LParen, N.buildin_type_name, T.RParen, N.cast_exp, )),
Production("binary_exp -> cast_exp",
'p173',
N.binary_exp,
(N.cast_exp, )),
Production("binary_exp -> binary_exp binary_op unary_exp",
'p174',
N.binary_exp,
(N.binary_exp, N.binary_op, N.unary_exp, )),
Production("binary_op -> *",
'p175',
N.binary_op,
(T.Times, )),
Production("binary_op -> /",
'p176',
N.binary_op,
(T.Divide, )),
Production("binary_op -> %",
'p177',
N.binary_op,
(T.Percent, )),
Production("binary_op -> +",
'p178',
N.binary_op,
(T.Plus, )),
Production("binary_op -> -",
'p179',
N.binary_op,
(T.Minus, )),
Production("binary_op -> <<",
'p180',
N.binary_op,
(T.LeftShift, )),
Production("binary_op -> >>",
'p181',
N.binary_op,
(T.RightShift, )),
Production("binary_op -> <",
'p182',
N.binary_op,
(T.LT, )),
Production("binary_op -> >",
'p183',
N.binary_op,
(T.GT, )),
Production("binary_op -> <=",
'p184',
N.binary_op,
(T.LE, )),
Production("binary_op -> >=",
'p185',
N.binary_op,
(T.GE, )),
Production("binary_op -> ==",
'p186',
N.binary_op,
(T.EQ, )),
Production("binary_op -> !=",
'p187',
N.binary_op,
(T.NEQ, )),
Production("binary_op -> &",
'p188',
N.binary_op,
(T.Ampersand, )),
Production("binary_op -> ^",
'p189',
N.binary_op,
(T.Caret, )),
Production("binary_op -> |",
'p190',
N.binary_op,
(T.VerticalBar, )),
Production("binary_op -> &&",
'p191',
N.binary_op,
(T.AND, )),
Production("binary_op -> ||",
'p192',
N.binary_op,
(T.OR, )),
Production("conditional_exp -> binary_exp",
'p193',
N.conditional_exp,
(N.binary_exp, )),
Production("conditional_exp -> binary_exp ? exp : conditional_exp",
'p194',
N.conditional_exp,
(N.binary_exp, T.Question, N.exp, T.Colon, N.conditional_exp, )),
Production("assignment_exp -> conditional_exp",
'p195',
N.assignment_exp,
(N.conditional_exp, )),
Production("assignment_exp -> unary_exp assignment_op assignment_exp",
'p196',
N.assignment_exp,
(N.unary_exp, N.assignment_op, N.assignment_exp, )),
Production("assignment_op -> =",
'p197',
N.assignment_op,
(T.Assign, )),
Production("assignment_op -> *=",
'p198',
N.assignment_op,
(T.AddAssign, )),
Production("assignment_op -> /=",
'p199',
N.assignment_op,
(T.SubAssign, )),
Production("assignment_op -> %=",
'p200',
N.assignment_op,
(T.MulAssign, )),
Production("assignment_op -> +=",
'p201',
N.assignment_op,
(T.DivAssign, )),
Production("assignment_op -> -=",
'p202',
N.assignment_op,
(T.ModAssign, )),
Production("assignment_op -> <<=",
'p203',
N.assignment_op,
(T.LeftShiftAssign, )),
Production("assignment_op -> >>=",
'p204',
N.assignment_op,
(T.RightShiftAssign, )),
Production("assignment_op -> &=",
'p205',
N.assignment_op,
(T.AndAssign, )),
Production("assignment_op -> ^=",
'p206',
N.assignment_op,
(T.XorAssign, )),
Production("assignment_op -> |=",
'p207',
N.assignment_op,
(T.OrAssign, )),
Production("exp -> assignment_exp",
'p208',
N.exp,
(N.assignment_exp, )),
Production("exp -> exp , assignment_exp",
'p209',
N.exp,
(N.exp, T.Comma, N.assignment_exp, )),
Production("dec -> struct_specifier ;",
'p210',
N.dec,
(N.struct_specifier, T.Semicolon, )),
Production("dec -> dec_specifier init_dec_list ;",
'p211',
N.dec,
(N.dec_specifier, N.init_dec_list, T.Semicolon, )),
Production("dec_specifier -> type_specifier",
'p212',
N.dec_specifier,
(N.type_specifier, )),
Production("dec_specifier -> type_qualifier dec_specifier",
'p213',
N.dec_specifier,
(N.type_qualifier, N.dec_specifier, )),
Production("dec_specifier -> storage_class_specifier dec_specifier",
'p214',
N.dec_specifier,
(N.storage_class_specifier, N.dec_specifier, )),
Production("type_specifier -> buildin_type_name",
'p215',
N.type_specifier,
(N.buildin_type_name, )),
Production("type_specifier -> typedef_name",
'p216',
N.type_specifier,
(N.typedef_name, )),
Production("buildin_type_name -> 'void'",
'p217',
N.buildin_type_name,
('void', )),
Production("buildin_type_name -> 'char'",
'p218',
N.buildin_type_name,
('char', )),
Production("buildin_type_name -> 'short'",
'p219',
N.buildin_type_name,
('short', )),
Production("buildin_type_name -> 'int'",
'p220',
N.buildin_type_name,
('int', )),
Production("buildin_type_name -> 'long'",
'p221',
N.buildin_type_name,
('long', )),
Production("buildin_type_name -> 'fixed'",
'p222',
N.buildin_type_name,
('fixed', )),
Production("buildin_type_name -> 'half'",
'p223',
N.buildin_type_name,
('half', )),
Production("buildin_type_name -> 'float'",
'p224',
N.buildin_type_name,
('float', )),
Production("buildin_type_name -> 'double'",
'p225',
N.buildin_type_name,
('double', )),
Production("buildin_type_name -> 'sampler2D'",
'p226',
N.buildin_type_name,
('sampler2D', )),
Production("buildin_type_name -> 'float2'",
'p227',
N.buildin_type_name,
('float2', )),
Production("buildin_type_name -> 'float3'",
'p228',
N.buildin_type_name,
('float3', )),
Production("buildin_type_name -> 'float4'",
'p229',
N.buildin_type_name,
('float4', )),
Production("buildin_type_name -> 'half2'",
'p230',
N.buildin_type_name,
('half2', )),
Production("buildin_type_name -> 'half3'",
'p231',
N.buildin_type_name,
('half3', )),
Production("buildin_type_name -> 'half4'",
'p232',
N.buildin_type_name,
('half4', )),
Production("buildin_type_name -> 'fixed2'",
'p233',
N.buildin_type_name,
('fixed2', )),
Production("buildin_type_name -> 'fixed3'",
'p234',
N.buildin_type_name,
('fixed3', )),
Production("buildin_type_name -> 'fixed4'",
'p235',
N.buildin_type_name,
('fixed4', )),
Production("buildin_type_name -> 'float3x3'",
'p236',
N.buildin_type_name,
('float3x3', )),
Production("type_qualifier -> 'uniform'",
'p237',
N.type_qualifier,
('uniform', )),
Production("type_qualifier -> 'inline'",
'p238',
N.type_qualifier,
('inline', )),
Production("type_qualifier -> 'const'",
'p239',
N.type_qualifier,
('const', )),
Production("storage_class_specifier -> 'static'",
'p240',
N.storage_class_specifier,
('static', )),
Production("typedef_name -> ID",
'p241',
N.typedef_name,
(T.ID, )),
Production("struct_specifier -> 'struct' ID",
'p242',
N.struct_specifier,
('struct', T.ID, )),
Production("struct_specifier -> 'struct' ID { struct_dec_list }",
'p243',
N.struct_specifier,
('struct', T.ID, T.LBrace, N.struct_dec_list, T.RBrace, )),
Production("struct_dec_list -> struct_dec",
'p244',
N.struct_dec_list,
(N.struct_dec, )),
Production("struct_dec_list -> struct_dec_list struct_dec",
'p245',
N.struct_dec_list,
(N.struct_dec_list, N.struct_dec, )),
Production("struct_dec -> type_specifier struct_declarator_list ;",
'p246',
N.struct_dec,
(N.type_specifier, N.struct_declarator_list, T.Semicolon, )),
Production("struct_dec -> ID ;",
'p247',
N.struct_dec,
(T.ID, T.Semicolon, )),
Production("struct_dec -> ID ( Number )",
'p248',
N.struct_dec,
(T.ID, T.LParen, T.Number, T.RParen, )),
Production("struct_dec -> ID ( Number , Number )",
'p249',
N.struct_dec,
(T.ID, T.LParen, T.Number, T.Comma, T.Number, T.RParen, )),
Production("struct_dec -> pp_if_stm",
'p250',
N.struct_dec,
(N.pp_if_stm, )),
Production("struct_dec -> 'INTERNAL_DATA'",
'p251',
N.struct_dec,
('INTERNAL_DATA', )),
Production("struct_dec -> 'UNITY_VERTEX_INPUT_INSTANCE_ID'",
'p252',
N.struct_dec,
('UNITY_VERTEX_INPUT_INSTANCE_ID', )),
Production("struct_dec -> 'UNITY_VERTEX_OUTPUT_STEREO'",
'p253',
N.struct_dec,
('UNITY_VERTEX_OUTPUT_STEREO', )),
Production("struct_declarator_list -> struct_declarator",
'p254',
N.struct_declarator_list,
(N.struct_declarator, )),
Production("struct_declarator_list -> struct_declarator_list , struct_declarator",
'p255',
N.struct_declarator_list,
(N.struct_declarator_list, T.Comma, N.struct_declarator, )),
Production("struct_declarator -> declarator",
'p256',
N.struct_declarator,
(N.declarator, )),
Production("struct_declarator -> declarator : ID",
'p257',
N.struct_declarator,
(N.declarator, T.Colon, T.ID, )),
Production("declarator -> ID",
'p258',
N.declarator,
(T.ID, )),
Production("declarator -> declarator [ exp ]",
'p259',
N.declarator,
(N.declarator, T.LBrack, N.exp, T.RBrack, )),
Production("declarator -> declarator ( )",
'p260',
N.declarator,
(N.declarator, T.LParen, T.RParen, )),
Production("declarator -> declarator ( parameter_list )",
'p261',
N.declarator,
(N.declarator, T.LParen, N.parameter_list, T.RParen, )),
Production("parameter_list -> parameter_dec",
'p262',
N.parameter_list,
(N.parameter_dec, )),
Production("parameter_list -> parameter_list , parameter_dec",
'p263',
| |
GetMotorTorques(self):
"""Get the amount of torque the motors are exerting.
This function mimicks the noisy sensor reading and adds latency.
Returns:
Motor torques of all eight motors polluted by noise and latency.
"""
return self._AddSensorNoise(
np.array(self._control_observation[2 * self.num_motors:3 *
self.num_motors]),
self._observation_noise_stdev[2])
def GetEnergyConsumptionPerControlStep(self):
"""Get the amount of energy used in last one time step.
Returns:
Energy Consumption based on motor velocities and torques (Nm^2/s).
"""
return np.abs(np.dot(
self.GetMotorTorques(),
self.GetMotorVelocities())) * self.time_step * self._action_repeat
def GetTrueBaseOrientation(self):
"""Get the orientation of minitaur's base, represented as quaternion.
Returns:
The orientation of minitaur's base.
"""
return self._base_orientation
def GetBaseOrientation(self):
"""Get the orientation of minitaur's base, represented as quaternion.
This function mimicks the noisy sensor reading and adds latency.
Returns:
The orientation of minitaur's base polluted by noise and latency.
"""
return self._pybullet_client.getQuaternionFromEuler(
self.GetBaseRollPitchYaw())
def GetTrueBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the minitaur's base in euler angle.
Returns:
rate of (roll, pitch, yaw) change of the minitaur's base.
"""
angular_velocity = self._pybullet_client.getBaseVelocity(self.quadruped)[1]
orientation = self.GetTrueBaseOrientation()
return self.TransformAngularVelocityToLocalFrame(angular_velocity,
orientation)
def TransformAngularVelocityToLocalFrame(self, angular_velocity,
orientation):
"""Transform the angular velocity from world frame to robot's frame.
Args:
angular_velocity: Angular velocity of the robot in world frame.
orientation: Orientation of the robot represented as a quaternion.
Returns:
angular velocity of based on the given orientation.
"""
# Treat angular velocity as a position vector, then transform based on the
# orientation given by dividing (or multiplying with inverse).
# Get inverse quaternion assuming the vector is at 0,0,0 origin.
_, orientation_inversed = self._pybullet_client.invertTransform(
[0, 0, 0], orientation)
# Transform the angular_velocity at neutral orientation using a neutral
# translation and reverse of the given orientation.
relative_velocity, _ = self._pybullet_client.multiplyTransforms(
[0, 0, 0], orientation_inversed, angular_velocity,
self._pybullet_client.getQuaternionFromEuler([0, 0, 0]))
return np.asarray(relative_velocity)
def GetBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the minitaur's base in euler angle.
This function mimicks the noisy sensor reading and adds latency.
Returns:
rate of (roll, pitch, yaw) change of the minitaur's base polluted by noise
and latency.
"""
return self._AddSensorNoise(
np.array(self._control_observation[3 * self.num_motors +
4:3 * self.num_motors + 7]),
self._observation_noise_stdev[4])
def GetActionDimension(self):
"""Get the length of the action list.
Returns:
The length of the action list.
"""
return self.num_motors
def _ApplyOverheatProtection(self, actual_torque):
if self._motor_overheat_protection:
for i in range(self.num_motors):
if abs(actual_torque[i]) > OVERHEAT_SHUTDOWN_TORQUE:
self._overheat_counter[i] += 1
else:
self._overheat_counter[i] = 0
if (self._overheat_counter[i] >
OVERHEAT_SHUTDOWN_TIME / self.time_step):
self._motor_enabled_list[i] = False
def ApplyAction(self, motor_commands, motor_control_mode=None):
"""Apply the motor commands using the motor model.
Args:
motor_commands: np.array. Can be motor angles, torques, hybrid commands,
or motor pwms (for Minitaur only).
motor_control_mode: A MotorControlMode enum.
"""
self.last_action_time = self._state_action_counter * self.time_step
control_mode = motor_control_mode
if control_mode is None:
control_mode = self._motor_control_mode
motor_commands = np.asarray(motor_commands)
q, qdot = self._GetPDObservation()
qdot_true = self.GetTrueMotorVelocities()
actual_torque, observed_torque = self._motor_model.convert_to_torque(
motor_commands, q, qdot, qdot_true, control_mode)
# May turn off the motor
self._ApplyOverheatProtection(actual_torque)
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = observed_torque
# Transform into the motor space when applying the torque.
self._applied_motor_torque = np.multiply(actual_torque,
self._motor_direction)
motor_ids = []
motor_torques = []
for motor_id, motor_torque, motor_enabled in zip(
self._motor_id_list, self._applied_motor_torque,
self._motor_enabled_list):
if motor_enabled:
motor_ids.append(motor_id)
motor_torques.append(motor_torque)
else:
motor_ids.append(motor_id)
motor_torques.append(0)
self._SetMotorTorqueByIds(motor_ids, motor_torques)
def ConvertFromLegModel(self, actions):
"""Convert the actions that use leg model to the real motor actions.
Args:
actions: The theta, phi of the leg model.
Returns:
The eight desired motor angles that can be used in ApplyActions().
"""
motor_angle = copy.deepcopy(actions)
scale_for_singularity = 1
offset_for_singularity = 1.5
half_num_motors = self.num_motors // 2
quater_pi = math.pi / 4
for i in range(self.num_motors):
action_idx = i // 2
forward_backward_component = (
-scale_for_singularity * quater_pi *
(actions[action_idx + half_num_motors] + offset_for_singularity))
extension_component = (-1)**i * quater_pi * actions[action_idx]
if i >= half_num_motors:
extension_component = -extension_component
motor_angle[i] = (math.pi + forward_backward_component +
extension_component)
return motor_angle
def GetBaseMassesFromURDF(self):
"""Get the mass of the base from the URDF file."""
return self._base_mass_urdf
def GetBaseInertiasFromURDF(self):
"""Get the inertia of the base from the URDF file."""
return self._base_inertia_urdf
def GetLegMassesFromURDF(self):
"""Get the mass of the legs from the URDF file."""
return self._leg_masses_urdf
def GetLegInertiasFromURDF(self):
"""Get the inertia of the legs from the URDF file."""
return self._leg_inertia_urdf
def SetBaseMasses(self, base_mass):
"""Set the mass of minitaur's base.
Args:
base_mass: A list of masses of each body link in CHASIS_LINK_IDS. The
length of this list should be the same as the length of CHASIS_LINK_IDS.
Raises:
ValueError: It is raised when the length of base_mass is not the same as
the length of self._chassis_link_ids.
"""
if len(base_mass) != len(self._chassis_link_ids):
raise ValueError(
"The length of base_mass {} and self._chassis_link_ids {} are not "
"the same.".format(len(base_mass), len(self._chassis_link_ids)))
for chassis_id, chassis_mass in zip(self._chassis_link_ids, base_mass):
self._pybullet_client.changeDynamics(self.quadruped,
chassis_id,
mass=chassis_mass)
def SetLegMasses(self, leg_masses):
"""Set the mass of the legs.
A leg includes leg_link and motor. 4 legs contain 16 links (4 links each)
and 8 motors. First 16 numbers correspond to link masses, last 8 correspond
to motor masses (24 total).
Args:
leg_masses: The leg and motor masses for all the leg links and motors.
Raises:
ValueError: It is raised when the length of masses is not equal to number
of links + motors.
"""
if len(leg_masses) != len(self._leg_link_ids) + len(self._motor_link_ids):
raise ValueError("The number of values passed to SetLegMasses are "
"different than number of leg links and motors.")
for leg_id, leg_mass in zip(self._leg_link_ids, leg_masses):
self._pybullet_client.changeDynamics(self.quadruped,
leg_id,
mass=leg_mass)
motor_masses = leg_masses[len(self._leg_link_ids):]
for link_id, motor_mass in zip(self._motor_link_ids, motor_masses):
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
mass=motor_mass)
def SetBaseInertias(self, base_inertias):
"""Set the inertias of minitaur's base.
Args:
base_inertias: A list of inertias of each body link in CHASIS_LINK_IDS.
The length of this list should be the same as the length of
CHASIS_LINK_IDS.
Raises:
ValueError: It is raised when the length of base_inertias is not the same
as the length of self._chassis_link_ids and base_inertias contains
negative values.
"""
if len(base_inertias) != len(self._chassis_link_ids):
raise ValueError(
"The length of base_inertias {} and self._chassis_link_ids {} are "
"not the same.".format(len(base_inertias),
len(self._chassis_link_ids)))
for chassis_id, chassis_inertia in zip(self._chassis_link_ids,
base_inertias):
for inertia_value in chassis_inertia:
if (np.asarray(inertia_value) < 0).any():
raise ValueError("Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(
self.quadruped, chassis_id, localInertiaDiagonal=chassis_inertia)
def SetLegInertias(self, leg_inertias):
"""Set the inertias of the legs.
A leg includes leg_link and motor. 4 legs contain 16 links (4 links each)
and 8 motors. First 16 numbers correspond to link inertia, last 8 correspond
to motor inertia (24 total).
Args:
leg_inertias: The leg and motor inertias for all the leg links and motors.
Raises:
ValueError: It is raised when the length of inertias is not equal to
the number of links + motors or leg_inertias contains negative values.
"""
if len(leg_inertias) != len(self._leg_link_ids) + len(
self._motor_link_ids):
raise ValueError("The number of values passed to SetLegMasses are "
"different than number of leg links and motors.")
for leg_id, leg_inertia in zip(self._leg_link_ids, leg_inertias):
for inertia_value in leg_inertias:
if (np.asarray(inertia_value) < 0).any():
raise ValueError("Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(self.quadruped,
leg_id,
localInertiaDiagonal=leg_inertia)
motor_inertias = leg_inertias[len(self._leg_link_ids):]
for link_id, motor_inertia in zip(self._motor_link_ids, motor_inertias):
for inertia_value in motor_inertias:
if (np.asarray(inertia_value) < 0).any():
raise ValueError("Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
localInertiaDiagonal=motor_inertia)
def SetFootFriction(self, foot_friction):
"""Set the lateral friction of the feet.
Args:
foot_friction: The lateral friction coefficient of the foot. This value is
shared by all four feet.
"""
for link_id in self._foot_link_ids:
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
lateralFriction=foot_friction)
def SetFootRestitution(self, foot_restitution):
"""Set the coefficient of restitution at the feet.
Args:
foot_restitution: The coefficient of restitution (bounciness) of the feet.
This value is shared by all four feet.
"""
for link_id in self._foot_link_ids:
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
restitution=foot_restitution)
def SetJointFriction(self, joint_frictions):
for knee_joint_id, friction in zip(self._foot_link_ids, joint_frictions):
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=knee_joint_id,
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=friction)
def GetNumKneeJoints(self):
return len(self._foot_link_ids)
def SetBatteryVoltage(self, voltage):
self._motor_model.set_voltage(voltage)
def SetMotorViscousDamping(self, viscous_damping):
self._motor_model.set_viscous_damping(viscous_damping)
def GetTrueObservation(self):
observation = []
observation.extend(self.GetTrueMotorAngles())
observation.extend(self.GetTrueMotorVelocities())
observation.extend(self.GetTrueMotorTorques())
observation.extend(self.GetTrueBaseOrientation())
observation.extend(self.GetTrueBaseRollPitchYawRate())
return observation
def ReceiveObservation(self):
"""Receive the observation from sensors.
This function is called once per step. The observations are only updated
when this function is called.
"""
self._joint_states = self._pybullet_client.getJointStates(
self.quadruped, self._motor_id_list)
| |
"""
Inference module for PCM toolbox with main functionality for model fitting and evaluation.
@author: jdiedrichsen
"""
import numpy as np
from numpy.linalg import solve, eigh, cholesky
from numpy import sum, diag, log, eye, exp, trace, einsum
import pandas as pd
import PcmPy as pcm
from PcmPy.model import IndependentNoise, BlockPlusIndepNoise
from PcmPy.optimize import newton
def likelihood_individ(theta, M, YY, Z, X=None,
Noise = IndependentNoise(),
n_channel=1, fit_scale=False, scale_prior = 1e3, return_deriv=0):
"""Negative Log-Likelihood of the data and derivative in respect to the parameters
Parameters:
theta (np.array):
Vector of (log-)model parameters - these include model, signal, and noise parameters
M (PcmPy.model.Model):
Model object with predict function
YY (2d-np.array):
NxN Matrix of outer product of the activity data (Y*Y')
Z (2d-np.array):
NxQ Design matrix - relating the trials (N) to the random effects (Q)
X (np.array):
Fixed effects design matrix - will be accounted for by ReML
Noise (pcm.Noisemodel):
Pcm-noise mode to model block-effects (default: IndepenentNoise)
n_channel (int):
Number of channels
fit_scale (bool):
Fit a scaling parameter for the model (default is False)
scale_prior (float):
Prior variance for log-normal prior on scale parameter
return_deriv (int):
0: Only return negative loglikelihood
1: Return first derivative
2: Return first and second derivative (default)
Returns:
negloglike (double):
Negative log-likelihood of the data under a model
dLdtheta (1d-np.array):
First derivative of negloglike in respect to the parameters
ddLdtheta2 (2d-np.array):
Second derivative of negloglike in respect to the parameters
"""
N = YY.shape[0]
Q = Z.shape[1]
n_param = theta.shape[0]
# Get G-matrix and derivative of G-matrix in respect to parameters
model_params = theta[range(M.n_param)]
G,dGdtheta = M.predict(model_params)
# Get the scale parameter and scale G by it
if fit_scale:
scale_param = theta[M.n_param]
indx_scale = M.n_param # Index of scale parameter
else:
scale_param = 0
Gs = G * exp(scale_param)
# Get the noise model parameters
noise_params = theta[M.n_param+fit_scale:]
# Apply the matrix inversion lemma. The following statement is the same as
# V = (Z*Gs*Z' + S(noiseParam));
# iV = pinv(V);
Gs = (Gs + Gs.T) / 2 # Symmetrize
Glambda, GU = eigh(Gs)
idx = Glambda > (10e-10) # Find small eigenvalues
Zu = Z @ GU[:, idx]
iS = Noise.inverse(noise_params)
if type(iS) is np.float64:
matrixInv = (diag(1 / Glambda[idx]) / iS + Zu.T @ Zu)
iV = (eye(N) - Zu @ solve(matrixInv, Zu.T)) * iS
else:
matrixInv = (diag(1 / Glambda[idx]) + Zu.T @ iS @ Zu)
iV = iS - iS @ Zu @ solve(matrixInv,Zu.T) @ iS
# For ReML, compute the modified inverse iVr
if X is not None:
iVX = iV @ X
iVr = iV - iVX @ solve(X.T @ iVX, iVX.T)
else:
iVr = iV
# Computation of (restricted) likelihood
ldet = -2 * sum(log(diag(cholesky(iV)))) # Safe computation
llik = -n_channel / 2 * ldet - 0.5 * einsum('ij,ij->',iVr, YY)
if X is not None:
# P/2 log(det(X'V^-1*X))
llik -= n_channel * sum(log(diag(cholesky(X.T @ iV @X)))) #
if fit_scale:
llik -= scale_param**2 / (2 * scale_prior) # Add prior
# If no derivative - exit here
if return_deriv == 0:
return (-llik,) # Return as tuple for consistency
# Calculate the first derivative
A = iVr @ Z
B = YY @ iVr
iVdV = []
# Get the quantity iVdV = inv(V)dVdtheta for model parameters
for i in range(M.n_param):
iVdV.append(A @ dGdtheta[i,:,:] @ Z.T * exp(scale_param))
# Get iVdV for scaling parameter
if fit_scale:
iVdV.append(A @ G @ Z.T * exp(scale_param))
# Get iVdV for Noise parameters
for j in range(Noise.n_param):
dVdtheta = Noise.derivative(noise_params,j)
if type(dVdtheta) is np.float64:
iVdV.append(iVr * dVdtheta)
else:
iVdV.append(iVr @ dVdtheta)
# Based on iVdV we can get he first derivative
dLdtheta = np.zeros((n_param,))
for i in range(n_param):
dLdtheta[i] = -n_channel / 2 * trace(iVdV[i]) + 0.5 * einsum('ij,ij->',iVdV[i], B) # Trace(A@B.T)
if fit_scale:
dLdtheta[indx_scale] -= scale_param / scale_prior
# If only first derivative, exit here
if return_deriv == 1:
return (-llik, -dLdtheta)
# Calculate expected second derivative
d2L = np.zeros((n_param,n_param))
for i in range(n_param):
for j in range(i, n_param):
d2L[i, j] = -n_channel / 2 * einsum('ij,ji->',iVdV[i],iVdV[j]) # Trace(A@B)
d2L[j, i] = d2L[i, j]
if fit_scale:
d2L[indx_scale, indx_scale] -= 1 / scale_prior
if return_deriv == 2:
return (-llik, -dLdtheta, -d2L)
else:
raise NameError('return_deriv needs to be 0, 1 or 2')
def likelihood_group(theta, M, YY, Z, X=None,
Noise=IndependentNoise(),
n_channel=1, fit_scale=True, scale_prior=1e3,
return_deriv=0,return_individ=False):
"""Negative Log-Likelihood of group data and derivative in respect to the parameters
Parameters:
theta (np.array):
Vector of (log-)model parameters consisting of common model parameters (M.n_param or sum of M.common_param),
participant-specific parameters (interated by subject), unique model parameters (not in common_param),
scale parameter,noise parameters
M (pcm.Model):
Model object
YY (List of np.arrays):
List of NxN Matrix of outer product of the activity data (Y*Y')
Z (List of 2d-np.array):
NxQ Design matrix - relating the trials (N) to the random effects (Q)
X (List of np.array):
Fixed effects design matrix - will be accounted for by ReML
Noise (List of pcm.Noisemodel):
Pcm-noise model (default: IndependentNoise)
n_channel (List of int):
Number of channels
fit_scale (bool):
Fit a scaling parameter for the model (default is False)
scale_prior (float):
Prior variance for log-normal prior on scale parameter
return_deriv (int):
0: Only return negative likelihood
1: Return first derivative
2: Return first and second derivative (default)
return_individ (bool):
return individual likelihoods instead of group likelihood
return_deriv (int):
0:None, 1:First, 2: second
Returns:
negloglike:
Negative log-likelihood of the data under a model
dLdtheta (1d-np.array)
First derivative of negloglike in respect to the parameters
ddLdtheta2 (2d-np.array)
Second derivative of negloglike in respect to the parameters
"""
n_subj = len(YY)
n_param = theta.shape[0]
# Determine the common parameters to the group
if hasattr(M,'common_param'):
common_param = M.common_param
else:
common_param = np.ones((M.n_param,),dtype=np.bool_)
# Get the number of parameters
n_common = np.sum(common_param) # Number of common params
n_modsu = M.n_param - n_common # Number of subject-specific model params
n_scale = int(fit_scale) # Number of scale parameters
n_noise = Noise[0].n_param # Number of noise params
n_per_subj = n_modsu + n_scale + n_noise # Number of parameters per subj
# Generate the indices into the theta vector
indx_common = np.array(range(n_common))
indx_subj = np.arange(n_common, n_common + n_subj * n_per_subj, n_per_subj, dtype = int)
indx_subj = indx_subj.reshape((1,-1))
indx_modsu = np.zeros((n_modsu,1),dtype = int) + indx_subj
indx_scale = np.zeros((n_scale,1),dtype = int) + n_modsu + indx_subj
indx_noise = np.array(range(n_noise),dtype = int).T + n_scale + n_modsu + indx_subj
# preallocate the arrays
nl = np.zeros((n_subj,))
dFdh = np.zeros((n_subj,n_param))
dFdhh = np.zeros((n_subj,n_param,n_param))
# Loop over subjects and get individual likelihoods
for s in range(n_subj):
# Pick out the correct places for the group parameter vector for each subj
indx_model = np.zeros((M.n_param,), dtype=int)
indx_model[common_param]=indx_common
indx_model[np.logical_not(common_param)]=indx_modsu[:,s]
indx = np.concatenate([indx_model, indx_scale[:,s], indx_noise[:,s]])
ths = theta[indx]
# Get individual likelihood
res = likelihood_individ(ths, M, YY[s], Z[s], X[s],
Noise[s], n_channel[s], fit_scale = fit_scale, scale_prior = scale_prior, return_deriv = return_deriv)
iS = indx_scale[0,s]
nl[s] = res[0]
if return_deriv>0:
dFdh[s, indx] = res[1]
if return_deriv==2:
ixgrid = np.ix_([s],indx,indx)
dFdhh[ixgrid] = res[2]
# Integrate over subjects
if return_individ:
ra = [nl]
else:
ra = [np.sum(nl, axis=0)]
if return_deriv > 0:
ra.append(np.sum(dFdh,axis=0)) # First derivative
if return_deriv > 1:
ra.append(np.sum(dFdhh,axis=0)) # Second derivative
return ra
def fit_model_individ(Data, M, fixed_effect='block', fit_scale=False,
scale_prior = 1e3, noise_cov=None, algorithm=None,
optim_param={}, theta0=None, verbose = True):
"""Fits Models to a data set inidividually.
The model parameters are all individually fit.
Parameters:
Data (pcm.Dataset or list of pcm.Datasets):
List data set has partition and condition descriptors
M (pcm.Model or list of pcm.Models):
Models to be fitted on the data sets
fixed effect:
None, 'block', or nd-array. Default ('block') adds an intercept for each partition
fit_scale (bool):
Fit a additional scale parameter for each subject? Default is set to False.
scale_prior (float):
Prior variance for log-normal prior on scale parameter
algorithm (string):
Either 'newton' or 'minimize' - provides over-write for model specific algorithms
noise_cov:
None (i.i.d), 'block', or optional specific covariance structure of the noise
optim_param (dict):
Additional paramters to be passed to the optimizer
theta0 (list | |
<gh_stars>10-100
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import six
import os
import io
import itertools
from functools import partial
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.io import BatchSampler, DataLoader, Dataset
def create_data_loader(args, device, for_train=True):
data_loaders = [None, None]
data_prefixes = [args.train_data_prefix, args.eval_data_prefix
] if args.eval_data_prefix else [args.train_data_prefix]
for i, data_prefix in enumerate(data_prefixes):
dataset = Seq2SeqDataset(
fpattern=data_prefix + "." + args.src_lang,
trg_fpattern=data_prefix + "." + args.tar_lang,
src_vocab_fpath=args.vocab_prefix + "." + args.src_lang,
trg_vocab_fpath=args.vocab_prefix + "." + args.tar_lang,
token_delimiter=None,
start_mark="<s>",
end_mark="</s>",
unk_mark="<unk>",
max_length=args.max_len if i == 0 else None,
truncate=True,
trg_add_bos_eos=True)
(args.src_vocab_size, args.tar_vocab_size, bos_id, eos_id,
unk_id) = dataset.get_vocab_summary()
batch_sampler = Seq2SeqBatchSampler(
dataset=dataset,
use_token_batch=False,
batch_size=args.batch_size,
pool_size=args.batch_size * 20,
sort_type=SortType.POOL,
shuffle=False if args.enable_ce else True,
distribute_mode=True if i == 0 else False)
data_loader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
places=device,
collate_fn=partial(
prepare_train_input,
bos_id=bos_id,
eos_id=eos_id,
pad_id=eos_id),
num_workers=0,
return_list=True)
data_loaders[i] = data_loader
return data_loaders
def prepare_train_input(insts, bos_id, eos_id, pad_id):
src, src_length = pad_batch_data(
[inst[0] for inst in insts], pad_id=pad_id)
trg, trg_length = pad_batch_data(
[inst[1] for inst in insts], pad_id=pad_id)
trg_length = trg_length - 1
return src, src_length, trg[:, :-1], trg_length, trg[:, 1:, np.newaxis]
def prepare_infer_input(insts, bos_id, eos_id, pad_id):
src, src_length = pad_batch_data(insts, pad_id=pad_id)
return src, src_length
def pad_batch_data(insts, pad_id):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
inst_lens = np.array([len(inst) for inst in insts], dtype="int64")
max_len = np.max(inst_lens)
inst_data = np.array(
[inst + [pad_id] * (max_len - len(inst)) for inst in insts],
dtype="int64")
return inst_data, inst_lens
class SortType(object):
GLOBAL = 'global'
POOL = 'pool'
NONE = "none"
class Converter(object):
def __init__(self, vocab, beg, end, unk, delimiter, add_beg, add_end):
self._vocab = vocab
self._beg = beg
self._end = end
self._unk = unk
self._delimiter = delimiter
self._add_beg = add_beg
self._add_end = add_end
def __call__(self, sentence):
return ([self._beg] if self._add_beg else []) + [
self._vocab.get(w, self._unk)
for w in sentence.split(self._delimiter)
] + ([self._end] if self._add_end else [])
class ComposedConverter(object):
def __init__(self, converters):
self._converters = converters
def __call__(self, fields):
return [
converter(field)
for field, converter in zip(fields, self._converters)
]
class SentenceBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self._batch_size = batch_size
def append(self, info):
self.batch.append(info)
if len(self.batch) == self._batch_size:
tmp = self.batch
self.batch = []
return tmp
class TokenBatchCreator(object):
def __init__(self, batch_size):
self.batch = []
self.max_len = -1
self._batch_size = batch_size
def append(self, info):
cur_len = info.max_len
max_len = max(self.max_len, cur_len)
if max_len * (len(self.batch) + 1) > self._batch_size:
result = self.batch
self.batch = [info]
self.max_len = cur_len
return result
else:
self.max_len = max_len
self.batch.append(info)
class SampleInfo(object):
def __init__(self, i, lens):
self.i = i
self.lens = lens
self.max_len = lens[0] # to be consitent with the original reader
def get_ranges(self, min_length=None, max_length=None, truncate=False):
ranges = []
# source
if (min_length is None or self.lens[0] >= min_length) and (
max_length is None or self.lens[0] <= max_length or truncate):
end = max_length if truncate and max_length else self.lens[0]
ranges.append([0, end])
# target
if len(self.lens) == 2:
if (min_length is None or self.lens[1] >= min_length) and (
max_length is None or self.lens[1] <= max_length + 2 or
truncate):
end = max_length + 2 if truncate and max_length else self.lens[
1]
ranges.append([0, end])
return ranges if len(ranges) == len(self.lens) else None
class MinMaxFilter(object):
def __init__(self, max_len, min_len, underlying_creator):
self._min_len = min_len
self._max_len = max_len
self._creator = underlying_creator
def append(self, info):
if (self._min_len is None or info.min_len >= self._min_len) and (
self._max_len is None or info.max_len <= self._max_len):
return self._creator.append(info)
@property
def batch(self):
return self._creator.batch
class Seq2SeqDataset(Dataset):
def __init__(self,
src_vocab_fpath,
trg_vocab_fpath,
fpattern,
field_delimiter="\t",
token_delimiter=" ",
start_mark="<s>",
end_mark="<e>",
unk_mark="<unk>",
trg_fpattern=None,
trg_add_bos_eos=False,
byte_data=False,
min_length=None,
max_length=None,
truncate=False):
if byte_data:
# The WMT16 bpe data used here seems including bytes can not be
# decoded by utf8. Thus convert str to bytes, and use byte data
field_delimiter = field_delimiter.encode("utf8")
token_delimiter = token_delimiter.encode("utf8")
start_mark = start_mark.encode("utf8")
end_mark = end_mark.encode("utf8")
unk_mark = unk_mark.encode("utf8")
self._byte_data = byte_data
self._src_vocab = self.load_dict(src_vocab_fpath, byte_data=byte_data)
self._trg_vocab = self.load_dict(trg_vocab_fpath, byte_data=byte_data)
self._bos_idx = self._src_vocab[start_mark]
self._eos_idx = self._src_vocab[end_mark]
self._unk_idx = self._src_vocab[unk_mark]
self._field_delimiter = field_delimiter
self._token_delimiter = token_delimiter
self._min_length = min_length
self._max_length = max_length
self._truncate = truncate
self._trg_add_bos_eos = trg_add_bos_eos
self.load_src_trg_ids(fpattern, trg_fpattern)
def load_src_trg_ids(self, fpattern, trg_fpattern=None):
src_converter = Converter(
vocab=self._src_vocab,
beg=self._bos_idx,
end=self._eos_idx,
unk=self._unk_idx,
delimiter=self._token_delimiter,
add_beg=False,
add_end=False)
trg_converter = Converter(
vocab=self._trg_vocab,
beg=self._bos_idx,
end=self._eos_idx,
unk=self._unk_idx,
delimiter=self._token_delimiter,
add_beg=True if self._trg_add_bos_eos else False,
add_end=True if self._trg_add_bos_eos else False)
converters = ComposedConverter([src_converter, trg_converter])
self._src_seq_ids = []
self._trg_seq_ids = []
self._sample_infos = []
slots = [self._src_seq_ids, self._trg_seq_ids]
for i, line in enumerate(self._load_lines(fpattern, trg_fpattern)):
fields = converters(line)
lens = [len(field) for field in fields]
sample = SampleInfo(i, lens)
field_ranges = sample.get_ranges(self._min_length,
self._max_length, self._truncate)
if field_ranges:
for field, field_range, slot in zip(fields, field_ranges,
slots):
slot.append(field[field_range[0]:field_range[1]])
self._sample_infos.append(sample)
def _load_lines(self, fpattern, trg_fpattern=None):
fpaths = glob.glob(fpattern)
fpaths = sorted(fpaths) # TODO: Add custum sort
assert len(fpaths) > 0, "no matching file to the provided data path"
(f_mode, f_encoding,
endl) = ("rb", None, b"\n") if self._byte_data else ("r", "utf8",
"\n")
if trg_fpattern is None:
for fpath in fpaths:
with io.open(fpath, f_mode, encoding=f_encoding) as f:
for line in f:
fields = line.strip(endl).split(self._field_delimiter)
yield fields
else:
# separated source and target language data files
# assume we can get aligned data by sort the two language files
# TODO: Need more rigorous check
trg_fpaths = glob.glob(trg_fpattern)
trg_fpaths = sorted(trg_fpaths)
assert len(fpaths) == len(
trg_fpaths
), "the number of source language data files must equal \
with that of source language"
for fpath, trg_fpath in zip(fpaths, trg_fpaths):
with io.open(fpath, f_mode, encoding=f_encoding) as f:
with io.open(
trg_fpath, f_mode, encoding=f_encoding) as trg_f:
for line in zip(f, trg_f):
fields = [field.strip(endl) for field in line]
yield fields
@staticmethod
def load_dict(dict_path, reverse=False, byte_data=False):
word_dict = {}
(f_mode, f_encoding,
endl) = ("rb", None, b"\n") if byte_data else ("r", "utf8", "\n")
with io.open(dict_path, f_mode, encoding=f_encoding) as fdict:
for idx, line in enumerate(fdict):
if reverse:
word_dict[idx] = line.strip(endl)
else:
word_dict[line.strip(endl)] = idx
return word_dict
def get_vocab_summary(self):
return len(self._src_vocab), len(
self._trg_vocab), self._bos_idx, self._eos_idx, self._unk_idx
def __getitem__(self, idx):
return (self._src_seq_ids[idx], self._trg_seq_ids[idx]
) if self._trg_seq_ids else self._src_seq_ids[idx]
def __len__(self):
return len(self._sample_infos)
class Seq2SeqBatchSampler(BatchSampler):
def __init__(self,
dataset,
batch_size,
pool_size=10000,
sort_type=SortType.NONE,
min_length=None,
max_length=None,
shuffle=False,
shuffle_batch=False,
use_token_batch=False,
clip_last_batch=False,
distribute_mode=True,
seed=0):
for arg, value in locals().items():
if arg != "self":
setattr(self, "_" + arg, value)
self._random = np.random
self._random.seed(seed)
# for multi-devices
self._distribute_mode = distribute_mode
self._nranks = ParallelEnv().nranks
self._local_rank = ParallelEnv().local_rank
self._device_id = ParallelEnv().dev_id
def __iter__(self):
# global sort or global shuffle
if self._sort_type == SortType.GLOBAL:
infos = sorted(
self._dataset._sample_infos, key=lambda x: x.max_len)
else:
if self._shuffle:
infos = self._dataset._sample_infos
self._random.shuffle(infos)
else:
infos = self._dataset._sample_infos
if self._sort_type == SortType.POOL:
reverse = True
for i in range(0, len(infos), self._pool_size):
# to avoid placing short next to long sentences
reverse = False # not reverse
infos[i:i + self._pool_size] = sorted(
infos[i:i + self._pool_size],
key=lambda x: x.max_len,
reverse=reverse)
batches = []
batch_creator = TokenBatchCreator(
self.
_batch_size) if self._use_token_batch else SentenceBatchCreator(
self._batch_size * self._nranks)
batch_creator = MinMaxFilter(self._max_length, self._min_length,
batch_creator)
for info in infos:
batch = batch_creator.append(info)
if batch is not None:
batches.append(batch)
if not self._clip_last_batch and len(batch_creator.batch) != 0:
batches.append(batch_creator.batch)
if self._shuffle_batch:
self._random.shuffle(batches)
if not self._use_token_batch:
# when producing batches according to sequence number, to confirm
# neighbor batches which would be feed and run parallel have similar
# length (thus similar computational cost) after shuffle, we as take
# them as a whole when shuffling and split here
batches = [[
batch[self._batch_size * i:self._batch_size * (i + 1)]
for i in range(self._nranks)
] for batch in batches]
batches = list(itertools.chain.from_iterable(batches))
# for multi-device
for batch_id, batch in enumerate(batches):
if not | |
<reponame>SwankSwashbucklers/website-template
"""
Build Nick's prefered default project starting point for new websites. The
project will precompile sass files for css, and use bottle as the web
framework. It will be geared towards deployment on a cherrypy server using a
nginx reverse proxy.
Requirements:
- Python 3.x
- CoffeeScript
- Sass
- Git
- Inkscape
- Imagemagick
Copyright (c) 2015, <NAME>.
License: BSD (see LICENSE for details)
"""
################################################################################
##### Command Line Interface ###################################################
################################################################################
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument("name",
type=str,
metavar="NAME",
nargs="?",
default="untitled",
help="the name of the project for the new website." )
parser.add_argument("-p", "--path",
type=str,
default=os.getcwd(),
help="the path to the desired location of the new project." )
parser.add_argument("-f", "--favicon",
type=str,
help="location of image file to be used as the favicon for the project. "
"If an absolute path is not given, location will be assumed to be relative "
"to the location of this script. It is required to provide a square svg "
"file for use here." )
parser.add_argument("-r", "--resources",
type=str,
nargs='+',
help="locations of any additional resources to be added to the project. If "
"an absolute path is not given, location will be assumed to be relative to "
"the location of this script." )
args = parser.parse_args()
################################################################################
##### Overrides ################################################################
################################################################################
OVERRIDES = """\
from string import Template
from re import compile
class TemplateWrapper():
def __init__(self, cls):
PYTHON_LL = 80
HTML_LL = 120
self.cls = cls
self.headers = [
( # Primary python file header template
compile(r'\$ph{(.*?)}'),
lambda x: "\\n\\n{1}\\n##### {0} {2}\\n{1}\\n".format(
x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) )
),
( # Secondary python file header template
compile(r'\$sh{(.*?)}'),
lambda x: "\\n### {0} {1}".format(
x, '#'*(PYTHON_LL-len(x)-5) )
),
( # HTML file header template
compile(r'\$wh{(.*?)}'),
lambda x: "<!-- ***** {0} {1} -->".format(
x, '*'*(HTML_LL-len(x)-16) )
)
]
def __call__(self, template):
for header in self.headers:
ptn, tpl = header
for match in ptn.finditer(template):
replacements = ( match.group(0), tpl(match.group(1)) )
template = template.replace(*replacements)
template_obj = self.cls(template)
template_obj.populate = self.populate
return template_obj
@staticmethod
def populate(template, filepath, **kwargs):
for key, value in kwargs.items():
if isinstance(value, list):
kwargs[key] = "\\n".join(
[ t[0].safe_substitute(**t[1]) for t in value ]
)
try:
with open(filepath, 'w') as f:
f.write(template.safe_substitute(**kwargs))
except Exception as exception:
raise exception
Template = TemplateWrapper(Template)
from subprocess import Popen, call, DEVNULL, STDOUT, PIPE
from sys import executable
def sPopen(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name == 'nt':
from subprocess import CREATE_NEW_CONSOLE
Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE )
else:
Popen( command, shell=shell )
def sCall(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
"""
with open('overrides.py', 'w') as f:
f.write(OVERRIDES)
from overrides import sPopen, sCall
from overrides import TemplateWrapper
from string import Template
Template = TemplateWrapper(Template)
################################################################################
##### Templates ################################################################
################################################################################
BASE_PARTIAL_SASS_TEMPLATE = Template("""\
body.main {
@include fixpos(0);
margin: 0;
padding: 0;
font-size: 16px;
-webkit-font-smoothing: antialiased;
font-family: $main-font-stack; }
""" )
BASE_MODULE_SASS_TEMPLATE = Template("""\
$main-font-stack: 'Lato', sans-serif;
""" )
STYLES_SASS_TEMPLATE = Template("""\
@import "all";
""" )
UPDATE_SASS_TEMPLATE = Template("""\
from urllib.request import urlopen
from shutil import copyfileobj
import os
RESOURCES = (
[
{
"name": "_flex-box_mixins.scss",
"url":( "https://raw.githubusercontent.com/"
"mastastealth/sass-flex-mixin/master/_flexbox.scss" )
},
{
"name": "_media-query_mixins.scss",
"url":( "https://raw.githubusercontent.com/"
"paranoida/sass-mediaqueries/master/_media-queries.scss" )
},
{
"name": "_general_mixins.scss",
"url":( "https://raw.githubusercontent.com/"
"SwankSwashbucklers/some-sassy-mixins/master/mixins.scss" )
}
]
)
def populate_resource(resource_name, resource_url):
try:
with urlopen(resource_url) as response, \\
open(resource_name, 'wb') as f:
copyfileobj(response, f)
print("Successfully populated '{}'".format(resource_name))
except Exception as e:
message = "Could not populate resource" \\
if not (os.path.isfile(resource_name)) \\
else "Unable to update resource"
print("{}: {}\\n from url: {}\\nException: {}".format(
message, resource_name, resource_url, e ) )
print("Updating external sass resources")
for resource in RESOURCES:
populate_resource(resource['name'], resource['url'])
""" )
HEAD_TEMPLATE = Template("""\
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0,
maximum-scale=1.0, user-scalable=no">
<title>{{title}}</title>
<meta name="description" content="{{description}}">
<meta name="author" content="<NAME>">
<meta name="favicon_elements">
<meta name="open_graph">
<meta name="stylesheets">
</head>
""" )
INDEX_TEMPLATE = Template("""\
<!DOCTYPE html>
<html lang="en">
% include('~head.tpl', title='$title', description='$description')
<body>
</body>
</html>
""" )
ROUTES_TEMPLATE = Template("""\
@route('/', method='POST')
def api():
if request.POST.get("v") == 'vendetta':
return \"""\\
Evey: Who are you?
V: Who? Who is but the form following the function of what, and what
I am is a man in a mask.
Evey: Well I can see that.
V: Of course you can. I'm not questioning your powers of observation;
I'm merely remarking upon the paradox of asking a masked man who
he is.
Evey: Oh. Right.
V: But on this most auspicious of nights, permit me then, in lieu of
the more commonplace sobriquet, to suggest the character of this
dramatis persona.
V: Voila! In view, a humble vaudevillian veteran cast vicariously as
both victim and villain by the vicissitudes of Fate. This visage,
no mere veneer of vanity, is a vestige of the vox populi, now
vacant, vanished. However, this valourous visitation of a bygone
vexation stands vivified and has vowed to vanquish these venal and
virulent vermin vanguarding vice and vouchsafing the violently
vicious and voracious violation of volition! The only verdict is
vengeance; a vendetta held as a votive, not in vain, for the value
and veracity of such shall one day vindicate the vigilant and the
virtuous. Verily, this vichyssoise of verbiage veers most verbose,
so let me simply add that it's my very good honour to meet you and
you may call me V.
\"""
return load_root()
""" )
ROBOTS_TEMPLATE = Template("""\
User-agent: *
Disallow:
""" )
BUILD_PY_TEMPLATE = Template("""\
$ph{Command Line Interface}
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tempfile import gettempdir
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument("-p", "--path",
type=str,
help="the path to the desired location of the generated site")
parser.add_argument("-d", "--deploy",
action="store_true",
help="package site for movement to deployment server. Default path is the"
"current working directory, but the path flag will override that value" )
parser.add_argument("-r", "--reuse",
action="store_true",
help="if an already built website exists at the targeted path, attempt to"
"reuse already present resources (i.e. images, favicon elements and other"
"static resources)" )
args = parser.parse_args()
if args.path is None:
if args.deploy:
args.path = os.getcwd()
else:
args.path = gettempdir()
$ph{Overrides}
${override_str}
$ph{Templates}
APP_PY_TEMPLATE = Template(\"""\\
from bottle import run, route, get, post, error
from bottle import static_file, template, request
from bottle import HTTPError
$ph{Command Line Interface}
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath
import os
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument('-d', '--deploy',
action='store_true',
help='Run server for deployment' )
parser.add_argument('-i', '--ip',
type=str,
default="127.0.0.1",
help='ip to run the server against, default localhost' )
parser.add_argument('-p', '--port',
type=str,
default="8080",
help='port to run server on' )
args = parser.parse_args()
# change working directory to script directory
os.chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
$ph{Main Site Routes}
${main_routes}
$ph{API and Additional Site Routes}
${api_routes}
$ph{Static Routes}
${static_routes}
$sh{Favicon Routes}
${favicon_routes}
$sh{Image Routes}
${image_routes}
$sh{Font Routes}
${font_routes}
$sh{Stylesheet Routes}
${css_routes}
$sh{Javascript Routes}
${js_routes}
$ph{Error Routes}
@error(404)
def error404(error):
return 'nothing to see here'
$ph{Run Server}
if args.deploy:
run(host=args.ip, port=args.port, server='cherrypy') #deployment
else:
run(host=args.ip, port=args.port, debug=True, reloader=True) #development
\""" )
MAIN_ROUTE_TEMPLATE = Template(\"""\\
@route('/${path}')
def ${method_name}():
return template('${template}', request=request, template='${template}')
\""" )
STATIC_ROUTE_TEMPLATE = Template(\"""\\
@get('/${path}')
def load_resource():
return static_file('${file}', root='${root}')
\""" )
WATCH_SASS_SCRIPT = Template(\"""\\
from sys import argv
from shutil import rmtree
from subprocess import Popen
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath, isdir, isfile
import os
# change working directory to script directory
os.chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
command = "sass --watch"
for x in range(1, len(argv)):
command += " {0}.scss:../../www/static/css/{0}.css".format(argv[x])
p = Popen(command, shell=True)
try:
while True:
pass
except KeyboardInterrupt:
p.kill()
if isfile("_all.scss"): os.remove("_all.scss")
if isdir(".sass-cache"): rmtree(".sass-cache")
os.remove("watch.py") # argv[0] contains full path
\""" )
$ph{Script Body}
from os.path import relpath, normpath, join, isfile, isdir, splitext
from shutil import copy, copyfileobj, rmtree
from urllib.request import urlopen
from time import sleep
from re import match
from sys import exit
SCRIPT_DIR = os.getcwd()
PROJECT_NAME = relpath(SCRIPT_DIR, "..")
STATIC_ROUTE = lambda p, f, r: \\
( STATIC_ROUTE_TEMPLATE, { "path": p, "file": f, "root": r } )
MAIN_ROUTE = lambda p, m, t: \\
( MAIN_ROUTE_TEMPLATE, { "path": p, "method_name": m, "template": t } )
def fatal_exception(exception, message="", cleanup=True):
print("*******SCRIPT FAILED*******")
if message: print(message)
print("Exception: ", exception)
if cleanup:
try:
os.chdir(args.path)
rmtree('www')
except Exception as e:
print(e)
exit(1)
def migrate_files(directory, destination):
src_path = join(SCRIPT_DIR, directory)
if not isdir(destination): os.makedirs(destination)
for root, dirs, files in os.walk(src_path):
for dirname in dirs:
if dirname.startswith('!') or dirname in ['.DS_STORE']:
dirs.remove(dirname)
for filename in files:
if not filename.startswith('!'):
if not isfile(filename): #added for the reuse flag
copy(join(root, filename), join(destination, filename))
if not filename.startswith('~'):
yield normpath(join(relpath(root, src_path),
filename) ).replace('\\\\', '/')
def migrate_views():
return ([ MAIN_ROUTE("", "load_root", "index") ] +
| |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import collections
import numpy as np
import os
import pandas as pd
from scipy.misc import logsumexp
from scipy.special import gammaln
import scipy.stats
from sklearn.utils import sparsefuncs
import sys
import tenkit.stats as tk_stats
import cellranger.analysis.io as analysis_io
import cellranger.analysis.clustering as cr_clustering
import cellranger.analysis.constants as analysis_constants
import cellranger.io as cr_io
SSEQ_ZETA_QUANTILE = 0.995
DIFFERENTIAL_EXPRESSION = collections.namedtuple('DIFFERENTIAL_EXPRESSION', ['data'])
def estimate_size_factors(x):
""" Estimate size factors (related to cell RNA content and GEM-to-GEM technical variance)
Args:
x - Sparse matrix (csc) of counts (feature x cell)
Returns:
Array of floats, one per cell.
"""
counts_per_cell = np.squeeze(np.asarray(x.sum(axis=0)))
size_factors = counts_per_cell.astype(np.float64) / np.median(counts_per_cell)
return size_factors
def compute_sseq_params(x, zeta_quantile=SSEQ_ZETA_QUANTILE):
""" Compute global parameters for the sSeq differential expression method.
The key parameters are the shrunken feature-wise dispersions.
This method was published in:
<NAME>, et al. (2013) Shrinkage estimation of dispersion in Negative Binomial models for RNA-seq experiments with small sample size.
Bioinformatics. 29: 1275-1282. doi: 10.1093/bioinformatics/btt143
Args:
x - Sparse matrix (csc) of counts (feature x cell)
zeta_quantile (float) - Quantile of method-of-moments dispersion estimates to
use as the shrinkage target zeta.
Returns:
A dictionary containing the sSeq parameters and some diagnostic info.
"""
# Number of cells
N = x.shape[1]
# Number of features
G = x.shape[0]
# Estimate size factors and normalize the matrix for quick mean/var calcs
size_factors = estimate_size_factors(x)
# Cast to float to prevent truncation of 1 -> 0 for size factors < 1
x_norm = scipy.sparse.csc_matrix(x, dtype=np.float64, copy=True)
sparsefuncs.inplace_column_scale(x_norm, 1.0 / size_factors)
# Estimate featurewise mean, variance, and dispersion by the method of moments
# assuming that each feature follows a negative-binomial distribution.
mean_g = np.squeeze(np.asarray(x_norm.mean(axis=1, dtype=np.float64)))
# V[X] = E[X^2] - E[X]^2
mean_sq_g = np.squeeze(np.asarray(x_norm.multiply(x_norm).mean(axis=1, dtype=np.float64)))
var_g = mean_sq_g - np.square(mean_g)
# Method of moments estimate of feature-wise dispersion (phi)
# Only use features with non-zero variance in the following estimation
use_g = var_g > 0
phi_mm_g = np.zeros(G)
phi_mm_g[use_g] = np.maximum(0, (float(N) * var_g[use_g] - mean_g[use_g] * np.sum(1.0 / size_factors)) /
(np.square(mean_g[use_g]) * np.sum(1.0 / size_factors)))
# Estimate the optimal global target dispersion (zeta_hat).
# The true optimal zeta is that which minimizes the MSE vs the true dispersions.
# The featurewise dispersions will be "shrunk" towards our estimate of zeta.
# Use a high quantile of the MoM dispersion as our shrinkage target
# per the rule of thumb in Yu, et al.
zeta_hat = np.nanpercentile(phi_mm_g[use_g], 100.0 * zeta_quantile)
# Compute delta, the optimal shrinkage towards zeta_hat
# This defines a linear function that shrinks the MoM dispersion estimates
mean_phi_mm_g = np.mean(phi_mm_g[use_g])
delta = (np.sum(np.square(phi_mm_g[use_g] - mean_phi_mm_g)) / float(G - 1)) / \
(np.sum(np.square(phi_mm_g[use_g] - zeta_hat)) / float(G - 2))
# Compute the shrunken dispersion estimates
# Interpolate between the MoM estimates and zeta_hat by delta
phi_g = np.full(G, np.nan)
if np.any(phi_mm_g[use_g] > 0):
phi_g[use_g] = (1 - delta) * phi_mm_g[use_g] + delta * zeta_hat
else:
phi_g[use_g] = 0.0
return {
'N': N,
'G': G,
'size_factors': size_factors,
'mean_g': mean_g,
'var_g': var_g,
'use_g': use_g,
'phi_mm_g': phi_mm_g,
'eval_zeta': None,
'eval_asd': None,
'asd_slope': None,
'zeta_hat': zeta_hat,
'delta': delta,
'phi_g': phi_g,
}
def neg_bin_log_pmf(k, mu, phi):
""" Log(PMF) of negative binomial distribution with mean mu and dispersion phi,
conveniently parameterized.
Args:
k (int) - NB random variable
mu (float) - mean
phi (float) - dispersion
Returns:
The log of the pmf at k. """
r = 1.0 / phi
return gammaln(r + k) - (gammaln(r) + gammaln(k + 1)) + \
k * np.log(mu / (r + mu)) + \
r * np.log(r / (r + mu))
def nb_exact_test(x_a, x_b, size_factor_a, size_factor_b, mu, phi):
""" Compute p-value for a pairwise exact test using the negative binomial.
Args:
x_a (int) - Total count for a single feature in group A
x_b (int) - Total count for a single feature in group B
size_factor_a (float) - Sum of size factors for group A
size_factor_b (float) - Sum of size factors for group B
mu (float) - Common mean count for this feature
phi (float) - Common dispersion for this feature
Returns:
p-value (float); the probability that a random pair of counts under the null hypothesis is more extreme
than the observed pair of counts. """
size_factor_a = float(size_factor_a)
size_factor_b = float(size_factor_b)
mu = float(mu)
phi = float(phi)
if (x_a + x_b) == 0:
return 1.0
if phi == 0:
return 1.0
if size_factor_a == 0 or size_factor_b == 0:
return 1.0
all_x_a = np.arange(0, 1 + x_a + x_b, 1)
all_x_b = np.arange(x_a + x_b, -1, -1)
def log_prob(x, size_factor):
return neg_bin_log_pmf(x, size_factor * mu, phi / size_factor)
log_p_obs = log_prob(x_a, size_factor_a) + log_prob(x_b, size_factor_b)
log_p_all = log_prob(all_x_a, size_factor_a) + log_prob(all_x_b, size_factor_b)
more_extreme = log_p_all <= log_p_obs
if np.sum(more_extreme) == 0:
return 0.0
return np.exp(logsumexp(log_p_all[log_p_all <= log_p_obs]) - logsumexp(log_p_all))
def nb_asymptotic_test(x_a, x_b, size_factor_a, size_factor_b, mu, phi):
""" Compute p-value for a pairwise exact test using a fast beta approximation
to the conditional joint distribution of (x_a, x_b).
<NAME> and <NAME> (2008). Small-sample estimation of negative binomial dispersion, with applications to SAGE data. Biostatistics, 9, 321-332
"It is based a method-of-moments gamma approximation to the negative binomial distribution."
- Personal communication w/ author
Adapted from implementation in the "edgeR" package:
https://github.com/Bioconductor-mirror/edgeR/blob/1ab290c9585335cf99bb41f50cfce2ce4d40f907/R/exactTestBetaApprox.R
This function is vectorized. It always returns a vector even if the inputs are scalar.
Args:
x_a (int/np.array) - Total count for a single feature in group A
x_b (int/np.array) - Total count for a single feature in group B
size_factor_a (float) - Sum of size factors for group A
size_factor_b (float) - Sum of size factors for group B
mu (float/np.array) - Common mean count for this feature
phi (float/np.array) - Common dispersion for this feature
Returns:
p-value (np.array); the probability that a random pair of counts under the null hypothesis is more extreme
than the observed pair of counts. """
x_a = np.array(x_a, ndmin=1, copy=False)
x_b = np.array(x_b, ndmin=1, copy=False)
mu = np.array(mu, ndmin=1, copy=False)
phi = np.array(phi, ndmin=1, copy=False)
alpha = size_factor_a * mu / (1 + phi * mu)
beta = (size_factor_b / size_factor_a) * alpha
total = x_a + x_b
median = scipy.stats.beta.median(alpha, beta)
left = ((x_a + 0.5) / total) < median
right = np.logical_not(left)
p = np.empty(len(x_a))
p[left] = 2 * scipy.stats.beta.cdf((x_a[left] + 0.5) / total[left],
alpha[left], beta[left])
# If X ~ Beta(a, b) then 1 - X ~ Beta(b, a)
# This avoids the asymmetry in beta.cdf
p[right] = 2 * (scipy.stats.beta.cdf((x_b[right] + 0.5) / total[right],
beta[right], alpha[right]))
return p
def adjust_pvalue_bh(p):
""" Multiple testing correction of p-values using the Benjamini-Hochberg procedure """
descending = np.argsort(p)[::-1]
# q = p * N / k where p = p-value, N = # tests, k = p-value rank
scale = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(scale * p[descending]))
# Return to original order
return q[np.argsort(descending)]
def sseq_differential_expression(x, cond_a, cond_b, sseq_params, big_count=900):
""" Run sSeq pairwise differential expression test.
Args:
x - Sparse matrix (csc) of counts (feature x cell)
cond_a (np.array(int)): Indices of cells in group A
cond_b (np.array(int)): Indices of cells in group B
sseq_params (dict): Precomputed global parameters
big_count (int): Use asymptotic approximation if both counts > this
Returns:
A pd.DataFrame with DE results for group A relative to group B """
x_a = x[:, cond_a]
x_b = x[:, cond_b]
# Number of features
G = x.shape[0]
# Size factors
size_factor_a = np.sum(sseq_params['size_factors'][cond_a])
size_factor_b = np.sum(sseq_params['size_factors'][cond_b])
# Compute p-value for each feature
p_values = np.ones(G)
feature_sums_a = np.squeeze(np.asarray(x_a.sum(axis=1)))
feature_sums_b = np.squeeze(np.asarray(x_b.sum(axis=1)))
big = tk_stats.numpy_logical_and_list([sseq_params['use_g'], feature_sums_a > big_count, feature_sums_b > big_count])
small = np.logical_and(sseq_params['use_g'], np.logical_not(big))
sys.stderr.write("Computing %d exact tests and %d asymptotic tests.\n" %
(np.sum(small), np.sum(big)))
# Compute exact test for small-count features
for i in np.flatnonzero(small):
p_values[i] = nb_exact_test(feature_sums_a[i], feature_sums_b[i],
size_factor_a, size_factor_b,
sseq_params['mean_g'][i],
sseq_params['phi_g'][i])
# Compute asymptotic approximation for big-count features
p_values[big] = nb_asymptotic_test(feature_sums_a[big],
feature_sums_b[big],
size_factor_a, size_factor_b,
sseq_params['mean_g'][big],
sseq_params['phi_g'][big])
# Adjust p-values for multiple testing correction
# Only adjust the features that were actually tested
| |
sling_knife()
elif response == "C" or response == "c":
print("After choosing your two items, you continue down the path.")
time.sleep(3)
print("You hear the noise of passing cars. Relief washes over you as you run towards the noise in hopes of getting a lift home.")
time.sleep(5)
print("Suddenly you hear footsteps. You stop, wondering where it's coming from.")
time.sleep(4)
print("The sound of cars completely stops. That ever so familiar gagging smell!!!")
time.sleep(4)
print("You shine the torch up, what you think is a tree, it's no tree. . .")
time.sleep(3)
print("It is a vile, crusty creature with a head that looks like a speaker! You realise all along this is the creature that has been mimicking the noise!")
time.sleep(5)
sling_bat()
else:
print("Invalid response")
path_2_bag_half()
def path_2_bag():
print("You search the bag and find: a hunter's slingshot, batteries and a flip knife.")
time.sleep(2.5)
response = input("You can only hold 2 items, which do you pick? A) Knife & Batteries B) Slingshot & Knife C) Slingshot & Batteries ")
if response == "A" or response == "a":
print("After choosing your two items, you continue down the path.")
time.sleep(3)
print("You hear the noise of passing cars. Relief washes over you as you run towards the noise in hopes of getting a lift home.")
time.sleep(5)
print("Suddenly you hear footsteps. You stop, wondering where it's coming from.")
time.sleep(4)
print("The sound of cars completely stops. That ever so familiar gagging smell!!!")
time.sleep(4)
print("You shine the torch up, what you think is a tree, it's no tree. . .")
time.sleep(3)
print("It is a vile, crusty creature with a head that looks like a speaker! You realise all along this is the creature that has been mimicking the noise!")
time.sleep(5)
knife_bat()
elif response == "B" or response == "b":
print("After choosing your two items, you continue down the path.")
time.sleep(3)
print("You hear the noise of passing cars. Relief washes over you as you run towards the noise in hopes of getting a lift home.")
time.sleep(5)
print("Suddenly you hear footsteps. You stop, wondering where it's coming from.")
time.sleep(4)
print("The sound of cars completely stops. That ever so familiar gagging smell!!!")
time.sleep(4)
print("You shine the torch up, what you think is a tree, it's no tree. . .")
time.sleep(3)
print("It is a vile, crusty creature with a head that looks like a speaker! You realise all along this is the creature that has been mimicking the noise!")
time.sleep(5)
sling_knife()
elif response == "C" or response == "c":
print("After choosing your two items, you continue down the path.")
time.sleep(3)
print("You hear the noise of passing cars. Relief washes over you as you run towards the noise in hopes of getting a lift home.")
time.sleep(5)
print("Suddenly you hear footsteps. You stop, wondering where it's coming from.")
time.sleep(4)
print("The sound of cars completely stops. That ever so familiar gagging smell!!!")
time.sleep(4)
print("You shine the torch up, what you think is a tree, it's no tree. . .")
time.sleep(3)
print("It is a vile, crusty creature with a head that looks like a speaker! You realise all along this is the creature that has been mimicking the noise!")
time.sleep(5)
sling_bat()
else:
print("Invalid response")
path_2_bag_half()
def path_2_forf_half():
response = input("What do you do? A) FIGHT! B) Flight ")
if response == "A" or response == "a":
print("You chose to fight. All you have on you is a torch.")
time.sleep(3)
print("You throw it at the monster's head...")
time.sleep(3)
print("Of course it does nothing. You decide to accept fate.")
time.sleep(3)
print("IT'S CLAWS COME DOWN! GAME OVER! ")
elif response == "B" or response == "b":
print("You decide to flee and escape before the monster can get you.")
time.sleep(4)
print("After running for some time, you stumble upon a broken bag. ")
time.sleep(3)
path_2_bag()
else:
print("Invalid response")
path_2_forf_half()
def path_2_forf():
response = input("Fight or flight sets in. What do you do? A) FIGHT! B) Flight ")
if response == "A" or response == "a":
print("You chose to fight. All you have on you is a torch.")
time.sleep(3)
print("You throw it at the monster's head...")
time.sleep(3)
print("Of course it does nothing. You decide to accept fate.")
time.sleep(3)
print("IT'S CLAWS COME DOWN! GAME OVER! ")
elif response == "B" or response == "b":
print("You decide to flee and escape before the monster can get you.")
time.sleep(3)
print("After running for some time, you stumble upon a broken bag. ")
time.sleep(3)
path_2_bag()
else:
print("Invalid response")
path_2_forf_half()
def path_1_mother_half():
response = input("Pick a path: A) Path one B) Path two ")
if response == "A" or response == "a":
print("You come to a dark creepy cabin and decide to venture in.")
time.sleep(3)
print("As you slowly open the door, it creaks. What you see shocks and disgusts you")
time.sleep(5.5)
print("'IT'S JAY!' you exclaim! He's sitting there in the cabin playing scary games on his PS4. Phew!!! You decide to stay and play games.")
time.sleep(3.5)
print("Game End.")
elif response == "B" or response == "b":
print("The crying is geting louder, almost deafening, as your eyes dart around hoping to find your mother. The now familiar gagging smell, so close.")
time.sleep(5)
print("In the near distance, camouflaged among the branches, a shadow becomes clearer.")
time.sleep(4)
print("Now you can see some tall disgusting, mummified looking monster running towards you. ")
time.sleep(5)
path_2_forf()
else:
print("Invalid response")
path_1_mother_half()
def path_1_mother():
time.sleep(3)
print("You continue to walk.")
time.sleep(3)
print("'That is definitely my mother. Why is she upset?'")
time.sleep(3)
response = input("There are two paths but you hear her down both. Pick a path: A) Path one B) Path two ")
if response == "A" or response == "a":
print("You come to a dark creepy cabin and decide to venture in.")
time.sleep(3)
print("As you slowly open the door, it creaks. What you see shocks and disgusts you.")
time.sleep(5.5)
print("'IT'S JAY!' you exclaim! He's sitting there in the cabin playing scary games on his PS4. Phew!!! You decide to stay and play games.")
time.sleep(3)
print("Game End.")
elif response == "B" or response == "b":
print("The crying is getting louder, almost deafening, as your eyes dart around hoping to find your mother. The now familiar gagging smell, so close.")
time.sleep(5)
print("In the near distance, camouflaged among the branches, a shadow becomes clearer.")
time.sleep(4)
print("Now you can see some tall disgusting, mummified looking monster running towards you. ")
time.sleep(5)
path_2_forf()
else:
print("Invalid response")
path_1_mother_half()
def level_three_path_half():
response = input("Pick a path: A) Left B) Right ")
if response == "A" or response == "a":
print("This looks like the correct path…")
time.sleep(3)
print("You hear a voice in the distance, a familiar voice...")
time.sleep(3)
print("'Is that my mother??' ")
time.sleep(2)
path_1_mother()
elif response == "B" or response == "b":
print("You go right and stumble upon a broken bag.")
time.sleep(2)
path_2_bag()
else:
print("Invalid response.")
level_three_path_half()
def level_three():
time.sleep(1)
print("The stench in the damp air seems to get heavier. 'What's that smell???' ")
time.sleep(3.5)
response = input("You look down the paths, they look exactly the same. Pick a path: A) Left B) Right ")
if response == "A" or response == "a":
print("This looks like the correct path…")
time.sleep(3)
print("You hear a voice in the distance, a familiar voice...")
time.sleep(4)
print("'Is that my mother??' ")
time.sleep(2)
path_1_mother()
elif response == "B" or response == "b":
print("You go right and stumble upon a broken bag. ")
time.sleep(2)
path_2_bag()
else:
print("Invalid response.")
level_three_path_half()
def stuck_in_forest():
response = input("Make up your mind! Do you want to A) Pick up torch? B) Leave it and go back? ")
if response == "A" or response == "a":
print("You pick up the torch and turn it on. Now you can see two dirt paths clearly.")
time.sleep(3)
level_three()
elif response == "B" or response == "b":
print("Leaving so soon?")
time.sleep(2)
level_one()
else:
time.sleep(2)
print("Invalid response.")
stuck_in_forest()
def level_two():
time.sleep(2.5)
print("You stumble on, going deeper into the forest, so aware of how dark and creepy it feels, | |
<gh_stars>1-10
"""
Unit tests for ProgramEnrollment views.
"""
import json
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from unittest import mock
from uuid import UUID, uuid4
import ddt
from django.conf import settings
from django.core.cache import cache
from django.test import override_settings
from django.urls import reverse
from django.utils import timezone
from freezegun import freeze_time
from opaque_keys.edx.keys import CourseKey
from organizations.tests.factories import OrganizationFactory as LMSOrganizationFactory
from rest_framework import status
from rest_framework.test import APITestCase
from social_django.models import UserSocialAuth
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseStaffRole
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from common.djangoapps.student.tests.factories import GlobalStaffFactory
from common.djangoapps.student.tests.factories import InstructorFactory
from common.djangoapps.third_party_auth.tests.factories import SAMLProviderConfigFactory
from lms.djangoapps.bulk_email.models import BulkEmailFlag, Optout
from lms.djangoapps.certificates.data import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.program_enrollments.constants import ProgramCourseOperationStatuses as CourseStatuses
from lms.djangoapps.program_enrollments.constants import ProgramOperationStatuses as ProgramStatuses
from lms.djangoapps.program_enrollments.exceptions import ProviderDoesNotExistException
from lms.djangoapps.program_enrollments.models import ProgramCourseEnrollment, ProgramEnrollment
from lms.djangoapps.program_enrollments.tests.factories import (
CourseAccessRoleAssignmentFactory,
ProgramCourseEnrollmentFactory,
ProgramEnrollmentFactory
)
from openedx.core.djangoapps.catalog.cache import PROGRAM_CACHE_KEY_TPL, PROGRAMS_BY_ORGANIZATION_CACHE_KEY_TPL
from openedx.core.djangoapps.catalog.tests.factories import (
CourseFactory,
CourseRunFactory,
CurriculumFactory,
OrganizationFactory,
ProgramFactory
)
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangolib.testing.utils import CacheIsolationMixin
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModulestoreCourseFactory
from xmodule.modulestore.tests.factories import ItemFactory
from .. import views
from ..constants import (
ENABLE_ENROLLMENT_RESET_FLAG,
MAX_ENROLLMENT_RECORDS,
REQUEST_STUDENT_KEY,
CourseRunProgressStatuses
)
_DJANGOAPP_PATCH_FORMAT = 'lms.djangoapps.program_enrollments.{}'
_REST_API_PATCH_FORMAT = _DJANGOAPP_PATCH_FORMAT.format('rest_api.v1.{}')
_VIEW_PATCH_FORMAT = _REST_API_PATCH_FORMAT.format('views.{}')
_UTILS_PATCH_FORMAT = _REST_API_PATCH_FORMAT.format('utils.{}')
_get_users_patch_path = _DJANGOAPP_PATCH_FORMAT.format('api.writing.get_users_by_external_keys')
_patch_get_users = mock.patch(
_get_users_patch_path,
autospec=True,
return_value=defaultdict(lambda: None),
)
class ProgramCacheMixin(CacheIsolationMixin):
"""
Mixin for using program cache in tests
"""
ENABLED_CACHES = ['default']
def set_program_in_catalog_cache(self, program_uuid, program):
cache.set(PROGRAM_CACHE_KEY_TPL.format(uuid=program_uuid), program, None)
def set_org_in_catalog_cache(self, organization, program_uuids):
cache.set(PROGRAMS_BY_ORGANIZATION_CACHE_KEY_TPL.format(org_key=organization.short_name), program_uuids)
class EnrollmentsDataMixin(ProgramCacheMixin):
"""
Mixin to define some shared test data objects for program/course enrollment
view tests.
"""
view_name = 'SET-ME-IN-SUBCLASS'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.start_cache_isolation()
cls.organization_key = "testorg"
cls.catalog_org = OrganizationFactory(key=cls.organization_key)
cls.lms_org = LMSOrganizationFactory(short_name=cls.organization_key)
cls.program_uuid = UUID('00000000-1111-2222-3333-444444444444')
cls.program_uuid_tmpl = '00000000-1111-2222-3333-4444444444{0:02d}'
cls.curriculum_uuid = UUID('aaaaaaaa-1111-2222-3333-444444444444')
cls.other_curriculum_uuid = UUID('bbbbbbbb-1111-2222-3333-444444444444')
inactive_curriculum_uuid = UUID('cccccccc-1111-2222-3333-444444444444')
catalog_course_id_str = 'course-v1:edX+ToyX'
course_run_id_str = f'{catalog_course_id_str}+Toy_Course'
cls.course_id = CourseKey.from_string(course_run_id_str)
CourseOverviewFactory(id=cls.course_id)
course_run = CourseRunFactory(key=course_run_id_str)
cls.course = CourseFactory(key=catalog_course_id_str, course_runs=[course_run])
inactive_curriculum = CurriculumFactory(uuid=inactive_curriculum_uuid, is_active=False)
cls.curriculum = CurriculumFactory(uuid=cls.curriculum_uuid, courses=[cls.course])
cls.program = ProgramFactory(
uuid=cls.program_uuid,
authoring_organizations=[cls.catalog_org],
curricula=[inactive_curriculum, cls.curriculum],
)
cls.course_not_in_program = CourseFactory()
cls.course_not_in_program_id = CourseKey.from_string(
cls.course_not_in_program["course_runs"][0]["key"]
)
cls.password = 'password'
cls.student = UserFactory(username='student', password=<PASSWORD>)
cls.global_staff = GlobalStaffFactory(username='global-staff', password=<PASSWORD>)
def setUp(self):
super().setUp()
self.set_program_in_catalog_cache(self.program_uuid, self.program)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.end_cache_isolation()
def get_url(self, program_uuid=None, course_id=None):
""" Returns the primary URL requested by the test case. """
kwargs = {'program_uuid': program_uuid or self.program_uuid}
if course_id:
kwargs['course_id'] = course_id
return reverse(self.view_name, kwargs=kwargs)
def log_in_non_staff(self):
self.client.login(username=self.student.username, password=self.password)
def log_in_staff(self):
self.client.login(username=self.global_staff.username, password=self.password)
def learner_enrollment(self, student_key, enrollment_status="active", course_staff=None):
"""
Convenience method to create a learner enrollment record
"""
enrollment_record = {"student_key": student_key, "status": enrollment_status}
if course_staff is not None:
enrollment_record["course_staff"] = course_staff
return enrollment_record
def create_program_enrollment(self, external_user_key, user=False):
"""
Creates and returns a ProgramEnrollment for the given external_user_key and
user if specified.
"""
program_enrollment = ProgramEnrollmentFactory.create(
external_user_key=external_user_key,
program_uuid=self.program_uuid,
)
if user is not False:
program_enrollment.user = user
program_enrollment.save()
return program_enrollment
def create_program_course_enrollment(self, program_enrollment, course_status='active'):
"""
Creates and returns a ProgramCourseEnrollment for the given program_enrollment and
self.course_key, creating a CourseEnrollment if the program enrollment has a user
"""
course_enrollment = None
if program_enrollment.user:
course_enrollment = CourseEnrollmentFactory.create(
course_id=self.course_id,
user=program_enrollment.user,
mode=CourseMode.MASTERS
)
course_enrollment.is_active = course_status == "active"
course_enrollment.save()
return ProgramCourseEnrollmentFactory.create(
program_enrollment=program_enrollment,
course_key=self.course_id,
course_enrollment=course_enrollment,
status=course_status,
)
def create_program_and_course_enrollments(self, external_user_key, user=False, course_status='active'):
program_enrollment = self.create_program_enrollment(external_user_key, user)
return self.create_program_course_enrollment(program_enrollment, course_status=course_status)
class ProgramEnrollmentsGetTests(EnrollmentsDataMixin, APITestCase):
"""
Tests for GET calls to the Program Enrollments API.
"""
view_name = 'programs_api:v1:program_enrollments'
def create_program_enrollments(self):
"""
Helper method for creating program enrollment records.
"""
for i in range(2):
user_key = f'user-{i}'
ProgramEnrollmentFactory.create(
program_uuid=self.program_uuid,
curriculum_uuid=self.curriculum_uuid,
user=None,
status='pending',
external_user_key=user_key,
)
for i in range(2, 4):
user_key = f'user-{i}'
ProgramEnrollmentFactory.create(
program_uuid=self.program_uuid,
curriculum_uuid=self.curriculum_uuid,
user=UserFactory.create(username=f'student-{i}', email=f'email-{i}'),
external_user_key=user_key,
)
self.addCleanup(self.destroy_program_enrollments)
def destroy_program_enrollments(self):
"""
Deletes program enrollments associated with this test case's program_uuid.
"""
ProgramEnrollment.objects.filter(program_uuid=self.program_uuid).delete()
def test_404_if_no_program_with_key(self):
self.client.login(username=self.global_staff.username, password=self.password)
fake_program_uuid = UUID(self.program_uuid_tmpl.format(88))
response = self.client.get(self.get_url(fake_program_uuid))
assert status.HTTP_404_NOT_FOUND == response.status_code
def test_403_if_not_staff(self):
self.client.login(username=self.student.username, password=self.password)
response = self.client.get(self.get_url())
assert status.HTTP_403_FORBIDDEN == response.status_code
def test_401_if_anonymous(self):
response = self.client.get(self.get_url())
assert status.HTTP_401_UNAUTHORIZED == response.status_code
def test_200_empty_results(self):
self.client.login(username=self.global_staff.username, password=self.password)
response = self.client.get(self.get_url())
assert status.HTTP_200_OK == response.status_code
expected = {
'next': None,
'previous': None,
'results': [],
}
assert expected == response.data
def test_200_many_results(self):
self.client.login(username=self.global_staff.username, password=self.password)
self.create_program_enrollments()
response = self.client.get(self.get_url())
assert status.HTTP_200_OK == response.status_code
expected = {
'next': None,
'previous': None,
'results': [
{
'student_key': 'user-0', 'status': 'pending', 'account_exists': False,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "", 'email': ""
},
{
'student_key': 'user-1', 'status': 'pending', 'account_exists': False,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "", 'email': ""
},
{
'student_key': 'user-2', 'status': 'enrolled', 'account_exists': True,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "student-2", 'email': "email-2"
},
{
'student_key': 'user-3', 'status': 'enrolled', 'account_exists': True,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "student-3", 'email': "email-3"
},
],
}
assert expected == response.data
def test_200_many_pages(self):
self.client.login(username=self.global_staff.username, password=self.password)
self.create_program_enrollments()
url = self.get_url() + '?page_size=2'
response = self.client.get(url)
assert status.HTTP_200_OK == response.status_code
expected_results = [
{
'student_key': 'user-0', 'status': 'pending', 'account_exists': False,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "", 'email': ""
},
{
'student_key': 'user-1', 'status': 'pending', 'account_exists': False,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "", 'email': ""
},
]
assert expected_results == response.data['results']
# there's going to be a 'cursor' query param, but we have no way of knowing it's value
assert response.data['next'] is not None
assert self.get_url() in response.data['next']
assert '?cursor=' in response.data['next']
assert response.data['previous'] is None
next_response = self.client.get(response.data['next'])
assert status.HTTP_200_OK == next_response.status_code
next_expected_results = [
{
'student_key': 'user-2', 'status': 'enrolled', 'account_exists': True,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "student-2", 'email': "email-2"
},
{
'student_key': 'user-3', 'status': 'enrolled', 'account_exists': True,
'curriculum_uuid': str(self.curriculum_uuid), 'username': "student-3", 'email': "email-3"
},
]
assert next_expected_results == next_response.data['results']
assert next_response.data['next'] is None
# there's going to be a 'cursor' query param, but we have no way of knowing it's value
assert next_response.data['previous'] is not None
assert self.get_url() in next_response.data['previous']
assert '?cursor=' in next_response.data['previous']
@ddt.ddt
class ProgramEnrollmentsWriteMixin(EnrollmentsDataMixin):
""" Mixin class that defines common tests for program enrollment write endpoints """
add_uuid = False
view_name = 'programs_api:v1:program_enrollments'
def student_enrollment(self, enrollment_status, external_user_key=None, prepare_student=False):
""" Convenience method to create a student enrollment record """
enrollment = {
REQUEST_STUDENT_KEY: external_user_key or str(uuid4().hex[0:10]),
'status': enrollment_status,
}
if self.add_uuid:
enrollment['curriculum_uuid'] = str(uuid4())
if prepare_student:
self.prepare_student(enrollment[REQUEST_STUDENT_KEY])
return enrollment
def prepare_student(self, key):
pass
def test_unauthenticated(self):
self.client.logout()
request_data = [self.student_enrollment('enrolled')]
response = self.request(self.get_url(), json.dumps(request_data), content_type='application/json')
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_enrollment_payload_limit(self):
request_data = [self.student_enrollment('enrolled') for _ in range(MAX_ENROLLMENT_RECORDS + 1)]
response = self.request(self.get_url(), json.dumps(request_data), content_type='application/json')
assert response.status_code == status.HTTP_413_REQUEST_ENTITY_TOO_LARGE
def test_duplicate_enrollment(self):
request_data = [
self.student_enrollment('enrolled', '001'),
self.student_enrollment('enrolled', '001'),
]
response = self.request(self.get_url(), json.dumps(request_data), content_type='application/json')
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert response.data == {'001': 'duplicated'}
def test_unprocessable_enrollment(self):
response = self.request(
self.get_url(),
json.dumps([{'status': 'enrolled'}]),
content_type='application/json'
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_program_unauthorized(self):
student = UserFactory.create(password='password')
self.client.login(username=student.username, password='password')
request_data = [self.student_enrollment('enrolled')]
response = self.request(self.get_url(), json.dumps(request_data), content_type='application/json')
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_program_not_found(self):
post_data = [self.student_enrollment('enrolled')]
nonexistant_uuid = uuid4()
response = self.request(
self.get_url(program_uuid=nonexistant_uuid),
json.dumps(post_data),
content_type='application/json'
)
assert response.status_code == status.HTTP_404_NOT_FOUND
@ddt.data(
[{'status': 'pending'}],
[{'status': 'not-a-status'}],
[{'status': 'pending'}, {'status': 'pending'}],
)
def test_no_student_key(self, bad_records):
url = self.get_url()
enrollments = [self.student_enrollment('enrolled', '001', True)]
enrollments.extend(bad_records)
response = self.request(url, json.dumps(enrollments), content_type='application/json')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_extra_field(self):
self.student_enrollment('pending', 'learner-01', prepare_student=True)
enrollment = self.student_enrollment('enrolled', 'learner-01')
enrollment['favorite_pokemon'] = 'bulbasaur'
enrollments = [enrollment]
with _patch_get_users:
url = self.get_url()
response = self.request(url, json.dumps(enrollments), content_type='application/json')
assert 200 == response.status_code
self.assertDictEqual(
response.data,
{'learner-01': 'enrolled'}
)
@ddt.ddt
class ProgramEnrollmentsPostTests(ProgramEnrollmentsWriteMixin, APITestCase):
"""
Tests for the ProgramEnrollment view POST method.
"""
add_uuid = True
def setUp(self):
super().setUp()
self.request = self.client.post
self.client.login(username=self.global_staff.username, password='password')
def tearDown(self):
super().tearDown()
ProgramEnrollment.objects.all().delete()
def test_successful_program_enrollments_no_existing_user(self):
statuses = ['pending', 'enrolled', 'pending', 'ended']
external_user_keys = ['abc1', 'efg2', '<KEY>']
curriculum_uuids = [self.curriculum_uuid, self.curriculum_uuid, uuid4(), uuid4()]
post_data = [
{
REQUEST_STUDENT_KEY: e,
'status': s,
'curriculum_uuid': str(c)
}
for e, s, c in zip(external_user_keys, statuses, curriculum_uuids)
]
url = self.get_url(program_uuid=0)
with _patch_get_users:
response = self.client.post(url, json.dumps(post_data), content_type='application/json')
assert response.status_code == 200
for i in range(4):
enrollment = ProgramEnrollment.objects.get(external_user_key=external_user_keys[i])
assert enrollment.external_user_key == external_user_keys[i]
assert enrollment.program_uuid == self.program_uuid
assert enrollment.status == statuses[i]
assert enrollment.curriculum_uuid == curriculum_uuids[i]
assert enrollment.user is None
def test_successful_program_enrollments_existing_user(self):
post_data = [
{
'status': 'enrolled',
REQUEST_STUDENT_KEY: 'abc1',
'curriculum_uuid': str(self.curriculum_uuid)
}
]
user = UserFactory.create(username='test_user', email='<EMAIL>', password='password')
url = self.get_url()
with mock.patch(
_get_users_patch_path,
autospec=True,
return_value={'abc1': user},
):
response = self.client.post(
url, json.dumps(post_data), content_type='application/json'
)
assert response.status_code == 200
enrollment = ProgramEnrollment.objects.get(external_user_key='abc1')
assert enrollment.external_user_key == 'abc1'
assert enrollment.program_uuid == self.program_uuid
assert enrollment.status == 'enrolled'
assert enrollment.curriculum_uuid == self.curriculum_uuid
assert enrollment.user == user
def test_program_enrollments_no_idp(self):
post_data = [
{
'status': 'enrolled',
REQUEST_STUDENT_KEY: f'abc{i}',
'curriculum_uuid': str(self.curriculum_uuid)
} for i in range(3)
]
url = | |
<reponame>apyrgio/ganeti<gh_stars>0
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units dealing with instances."""
import logging
import os
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import masterd
from ganeti import netutils
from ganeti import objects
from ganeti import utils
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
from ganeti.cmdlib.common import \
INSTANCE_NOT_RUNNING, CheckNodeOnline, \
ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
LoadNodeEvacResult, \
ExpandInstanceUuidAndName, \
CheckInstanceState, ExpandNodeUuidAndName, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_storage import CreateDisks, \
ComputeDisks, \
StartInstanceDisks, ShutdownInstanceDisks, \
AssembleInstanceDisks
from ganeti.cmdlib.instance_utils import \
BuildInstanceHookEnvByObject,\
CheckNodeNotDrained, RemoveInstance, CopyLockList, \
CheckNodeVmCapable, CheckTargetNodeIPolicy, \
GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
CheckInstanceBridgesExist, \
CheckInstanceExistence, \
CheckHostnameSane, CheckOpportunisticLocking, ComputeFullBeParams, \
ComputeNics, CreateInstanceAllocRequest
import ganeti.masterd.instance
class LUInstanceRename(LogicalUnit):
"""Rename an instance.
"""
HPATH = "instance-rename"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.ip_check and not self.op.name_check:
# TODO: make the ip check more flexible and not depend on the name check
raise errors.OpPrereqError("IP address check requires a name check",
errors.ECODE_INVAL)
self._new_name_resolved = False
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance)
env["INSTANCE_NEW_NAME"] = self.op.new_name
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def _PerformChecksAndResolveNewName(self):
"""Checks and resolves the new name, storing the FQDN, if permitted.
"""
if self._new_name_resolved or not self.op.name_check:
return
hostname = CheckHostnameSane(self, self.op.new_name)
self.op.new_name = hostname.name
if (self.op.ip_check and
netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
(hostname.ip, self.op.new_name),
errors.ECODE_NOTUNIQUE)
self._new_name_resolved = True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
(self.op.instance_uuid, self.op.instance_name) = \
ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None
# It should actually not happen that an instance is running with a disabled
# disk template, but in case it does, the renaming of file-based instances
# will fail horribly. Thus, we test it before.
for disk in self.cfg.GetInstanceDisks(instance.uuid):
if (disk.dev_type in constants.DTS_FILEBASED and
self.op.new_name != instance.name):
# TODO: when disks are separate objects, this should check for disk
# types, not disk templates.
CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), disk.dev_type)
CheckNodeOnline(self, instance.primary_node)
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot rename")
self.instance = instance
self._PerformChecksAndResolveNewName()
if self.op.new_name != instance.name:
CheckInstanceExistence(self, self.op.new_name)
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
# Note that this call might not resolve anything if name checks have been
# disabled in the opcode. In this case, we might have a renaming collision
# if a shortened name and a full name are used simultaneously, as we will
# have two different locks. However, at that point the user has taken away
# the tools necessary to detect this issue.
self._PerformChecksAndResolveNewName()
# Used to prevent instance namespace collisions.
if self.op.new_name != self.op.instance_name:
CheckInstanceExistence(self, self.op.new_name)
self.add_locks[locking.LEVEL_INSTANCE] = self.op.new_name
def Exec(self, feedback_fn):
"""Rename the instance.
"""
old_name = self.instance.name
rename_file_storage = False
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
renamed_storage = [d for d in disks
if (d.dev_type in constants.DTS_FILEBASED and
d.dev_type != constants.DT_GLUSTER)]
if (renamed_storage and self.op.new_name != self.instance.name):
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
rename_file_storage = True
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
# Assert that we have both the locks needed
assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)
# re-read the instance from the configuration after rename
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
if self.instance.forthcoming:
return renamed_inst.name
if rename_file_storage:
new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
old_file_storage_dir,
new_file_storage_dir)
result.Raise("Could not rename on node %s directory '%s' to '%s'"
" (but the instance has been renamed in Ganeti)" %
(self.cfg.GetNodeName(renamed_inst.primary_node),
old_file_storage_dir, new_file_storage_dir))
StartInstanceDisks(self, renamed_inst, None)
renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)
# update info on disks
info = GetInstanceInfoText(renamed_inst)
for (idx, disk) in enumerate(disks):
for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
result = self.rpc.call_blockdev_setinfo(node_uuid,
(disk, renamed_inst), info)
result.Warn("Error setting info on node %s for disk %s" %
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
try:
result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
renamed_inst, old_name,
self.op.debug_level)
result.Warn("Could not run OS rename script for instance %s on node %s"
" (but the instance has been renamed in Ganeti)" %
(renamed_inst.name,
self.cfg.GetNodeName(renamed_inst.primary_node)),
self.LogWarning)
finally:
ShutdownInstanceDisks(self, renamed_inst)
return renamed_inst.name
class LUInstanceRemove(LogicalUnit):
"""Remove an instance.
"""
HPATH = "instance-remove"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance,
secondary_nodes=self.secondary_nodes,
disks=self.inst_disks)
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()]
nl_post = list(self.cfg.GetInstanceNodes(self.instance.uuid)) + nl
return (nl, nl_post)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
self.secondary_nodes = \
self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
def Exec(self, feedback_fn):
"""Remove the instance.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) -
self.owned_locks(locking.LEVEL_NODE)), \
"Not owning correct locks"
if not self.instance.forthcoming:
logging.info("Shutting down instance %s on node %s", self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node))
result = self.rpc.call_instance_shutdown(self.instance.primary_node,
self.instance,
self.op.shutdown_timeout,
self.op.reason)
if self.op.ignore_failures:
result.Warn("Warning: can't shutdown instance", feedback_fn)
else:
result.Raise("Could not shutdown instance %s on node %s" %
(self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node)))
else:
logging.info("Instance %s on node %s is forthcoming; not shutting down",
self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node))
RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
class LUInstanceMove(LogicalUnit):
"""Move an instance by data-copying.
This LU is only used if the instance needs to be moved by copying the data
from one node in the cluster to another. The instance is shut down and
the data is copied to the new node and the configuration change is propagated,
then the instance is started again.
See also:
L{LUInstanceFailover} for moving an instance on shared storage (no copying
required).
L{LUInstanceMigrate} for the live migration of an instance (no shutdown
required).
"""
HPATH = "instance-move"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
(self.op.target_node_uuid, self.op.target_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
self.op.target_node)
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes(primary_only=True)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and target nodes of the instance.
"""
env = {
"TARGET_NODE": self.op.target_node,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [
self.cfg.GetMasterNode(),
self.instance.primary_node,
self.op.target_node_uuid,
]
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, dsk in enumerate(disks):
if dsk.dev_type not in constants.DTS_COPYABLE:
raise errors.OpPrereqError("Instance disk %d has disk type %s and is"
" not suitable for copying"
% (idx, dsk.dev_type), errors.ECODE_STATE)
target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
| |
self.__demangled_name
def set_demangled_name(self, value):
self.__demangled_name = value
return
def get_name(self):
return self.__name
def set_name(self, value):
self.__name = value
return
def add_member(self, member, off):
'''
Add the member, ensuring that the required parameters are specified
'''
member.cls = self
self.__members[off] = member
return
def add_method(self, method):
method.cls = self
self.__methods.append(method)
return
def add_vftable(self, vft, vfptr):
self.__vftables[vfptr] = vft
return
def add_parent(self, parent, off):
self.__parents[off] = parent
return
def add_usage(self, usage):
self.__vfcalls.append(usage)
return
def has_members(self):
return not self.__members
def has_methods(self):
return not self.__methods
def has_vftable(self):
return not self.__vftables
def get_members(self):
return self.__members
def get_methods(self):
return self.__methods
def get_vftables(self):
return self.__vftables
def set_vftables(self, value):
self.__vftables = value
def get_parents(self):
return self.__parents
def set_id(self, value):
self.__id = value
return
def get_id(self):
return self.__id
def set_applied(self, value):
self.__applied = value
return
def get_applied(self):
return self.__applied
def get_size(self):
return self.__size
def set_size(self,value):
self.__size = value
return
def get_usages(self):
return self.__vfcalls
def set_usages(self,value):
self.__vfcalls = value
def get_vfcalls(self):
return self.__vfcalls
def set_vfcalls(self,value):
self.__vfcalls = value
# Properties of class structures
demangled_name = property(get_demangled_name, set_demangled_name, None, None)
ida_name = property(get_ida_name, set_ida_name, None, None)
name = property(get_name, set_name, None, None)
members = property(get_members, None, None, None)
methods = property(get_methods, None, None, None)
vftables = property(get_vftables, set_vftables, None, None)
parents = property(get_parents, None, None, None)
id = property(get_id, set_id, None, None)
size = property(get_size,set_size, None, None)
applied = property(get_applied, set_applied, None, None)
usages = property(get_usages, set_usages, None, None)
vfcalls = property(get_vfcalls, set_vfcalls, None, None)
# =============================================================================
# End class
class PyOOAnalyzerMethodTreeItem(QTreeWidgetItem):
'''
This is a method element in the class viewer tree
'''
def __init__(self,parent, meth):
super(PyOOAnalyzerMethodTreeItem, self).__init__( parent )
self.method = meth
self.setText(0, self.method.method_name)
class PyOOAnalyzerMemberTreeItem(QTreeWidgetItem):
'''
This is a member element in the class viewer tree
'''
def __init__(self, parent, mem=None):
super(PyOOAnalyzerMemberTreeItem, self).__init__( parent )
self.member = mem
self.setText(0, self.member.member_name)
class PyOOAnalyzerStructTreeItem(QTreeWidgetItem):
'''
This is a class element in the class viewer tree
'''
def __init__(self, parent, cls=None):
super(PyOOAnalyzerStructTreeItem, self).__init__( parent )
self.class_struct = cls
self.setText(0, self.class_struct.ida_name)
class PyOOAnalyzerExpForm(idaapi.PluginForm):
'''
This is the main class viewer form.
'''
def __init__(self):
super(PyOOAnalyzerExpForm, self).__init__()
self.__objdigger = None
self.cls_tree = None
self.idbhook = None
self.idphook = None
# the name of the IDA netnode that contains class information
self.__NODE_NAME = "$OBJD"
self.__visible = False
return
def start_ida_hooks(self):
if self.idbhook == None:
self.idbhook = PyOOAnalyzerIDBHooks(self.__objdigger, self)
self.idbhook.hook()
if self.idphook == None:
self.idphook = PyOOAnalyzerIDPHooks(self.__objdigger, self)
self.idphook.hook()
def remove_ida_hooks(self):
if self.idbhook != None:
self.idbhook.unhook()
self.idbhook = None
if self.idphook != None:
self.idphook.unhook()
self.idphook = None
def populate_class_list(self):
'''
Initialize the class viewer
'''
cls_list = self.__objdigger.get_classes()
for cls in cls_list:
cls_entry = PyOOAnalyzerStructTreeItem(self.cls_tree, cls)
cls_methods = QTreeWidgetItem(cls_entry)
cls_methods.setText(0,"Methods")
for m in cls.methods:
PyOOAnalyzerMethodTreeItem(cls_methods, m)
cls_members = QTreeWidgetItem(cls_entry)
cls_members.setText(0,"Members")
for off, mbr in cls.members.iteritems():
if mbr.is_parent == False:
PyOOAnalyzerMemberTreeItem(cls_members, mbr)
if len(cls.parents) > 0:
cls_parents = QTreeWidgetItem(cls_entry)
cls_parents.setText(0,"Parents")
for o, p in cls.parents.iteritems():
# Parents are really special members that are class types
PyOOAnalyzerStructTreeItem(cls_parents, p)
return
def navigate(self, event):
'''
Enable 1-click navigation to class methods
'''
self.cls_tree.blockSignals(True)
item = self.cls_tree.currentItem()
if type(item) is PyOOAnalyzerMethodTreeItem:
idaapi.jumpto(item.method.start_ea)
self.cls_tree.blockSignals(False)
return
def update_class_method(self, cid, old_name, new_name):
'''
Update the class viewer in response to a method name change elsewhere in
the IDB
'''
self.cls_tree.blockSignals(True)
iterator = QTreeWidgetItemIterator(self.cls_tree,
QTreeWidgetItemIterator.NotHidden)
item = iterator.value()
terminate = False
while item and not terminate:
if type(item) is PyOOAnalyzerStructTreeItem:
if item.class_struct.id == cid:
# Have to check for because parents are stuct items,
# thus we may be in a parent
if item.childCount() > 0:
meth_group = item.child(0) # should be methods
if meth_group != None:
if meth_group.text(0) == "Methods":
i=0
while i<meth_group.childCount():
meth_item = meth_group.child(i)
if meth_item:
if type(meth_item) is PyOOAnalyzerMethodTreeItem:
if meth_item.text(0) == old_name:
meth_item.setText(0,new_name)
terminate = True
break
i += 1
iterator += 1
item = iterator.value()
self.cls_tree.blockSignals(False)
def update_class_member(self, cid, old_name, new_name):
'''
Update the class viewer in response to a member name change elsewhere in
the IDB
'''
self.cls_tree.blockSignals(True)
iterator = QTreeWidgetItemIterator(self.cls_tree,
QTreeWidgetItemIterator.NotHidden)
item = iterator.value()
terminate = False
while item and not terminate:
if type(item) is PyOOAnalyzerStructTreeItem:
if item.class_struct.id == cid:
# Have to check for because parents are stuct items,
# thus we may be in a parent
if item.childCount() > 0:
mbr_group = item.child(1) # should be members
if mbr_group != None:
if mbr_group.text(0) == "Members":
i=0
while i< mbr_group.childCount():
mem_item = mbr_group.child(i)
if mem_item:
if type(mem_item) is PyOOAnalyzerMemberTreeItem:
if mem_item.text(0) == old_name:
mem_item.setText(0,new_name)
terminate = True
break
i += 1
iterator += 1
item = iterator.value()
self.cls_tree.blockSignals(False)
def update_class(self, old_name, new_name):
'''
Update the class viewer in response to a class name change elsewhere in
the IDB
'''
self.cls_tree.blockSignals(True)
iterator = QTreeWidgetItemIterator(self.cls_tree,
QTreeWidgetItemIterator.NotHidden)
item = iterator.value()
while item:
if item.text(0) == old_name:
item.setText(0, new_name)
else:
# rename parents of this item
if item.childCount() == 3:
par_group = item.child(3) # should be members
if par_group != None:
if par_group.text(0) == "Parents":
i=0
while i<par_group.childCount():
par_item = mbr_group.child(i)
if par_item:
if par_item.text(0) == old_name:
par_item.setText(0, new_name)
iterator += 1
item = iterator.value()
self.cls_tree.blockSignals(False)
return
def __edit_member_from_class_viewer(self, item):
'''
Edit the class member from the Class Viewer
'''
member = item.member
old_member_name = member.member_name
new_member_name = idc.AskStr(old_member_name, "Enter new member name")
if new_member_name == None:
self.cls_tree.blockSignals(False)
return
# cid = idc.GetStrucIdByName(member.member_name
cls_members = idautils.StructMembers(member.cls.id)
for [off, n, s] in cls_members:
if n == member.member_name:
if idc.SetMemberName(member.cls.id, off, str(new_member_name)) != 0:
item.setText(0,str(new_member_name))
cls_list = self.__objdigger.get_classes()
for c in cls_list:
for moff, mem in c.members.iteritems():
if mem.member_name == old_member_name:
mem.member_name = new_member_name
else:
idc.Warning("Cannot rename member to %s, the name already exists :or is malformed!" % new_member_name)
return
def __edit_method_from_class_viewer(self,item):
'''
Handle method change request initiated from Class Viewer
'''
method = item.method
old_method_name = method.method_name
new_method_name = idc.AskStr(old_method_name, "Enter new method name")
if new_method_name == None:
self.cls_tree.blockSignals(False)
return
funcs = idautils.Functions()
for f in funcs:
fname = idc.GetFunctionName(f)
if fname != None:
if fname == old_method_name:
if idc.MakeName(f, new_method_name) != 0:
cls_list = self.__objdigger.get_classes()
for c in cls_list:
for m in c.methods:
if m.method_name == old_method_name or m.method_name == new_method_name:
m.method_name = new_method_name
cmt = m.method_name.replace('_', '::', 1)
# the method is a virtual function
if m.is_virtual == True:
cmt = "virtual %s" % cmt
# rename virtual function in vftable structure
for off, vt in c.vftables.iteritems():
for vfoff,vf in vt.virtual_functions.iteritems():
if vf.start_ea == m.start_ea:
global ignore_renamed
ignore_renamed = True
idc.SetMemberName(vt.id, 4*vfoff, new_method_name)
ignore_renamed = False
vf.method_name = new_method_name
break
if method.is_ctor == True: cmt += " (constructor)"
if method.is_dtor == True: cmt += " (destructor)"
idc.SetFunctionCmt(method.start_ea, cmt, 1)
item.setText(0,str(new_method_name))
else:
idc.Warning("Cannot rename method to %s, the name already exists!" % new_method_name)
return
def __edit_class_from_class_viewer(self, item):
'''
Handle class name change request initiated from Class Viewer
'''
old_name = item.class_struct.ida_name
cid = idaapi.get_struc_id(str(old_name))
if (cid is not None) and (cid != 0) and (cid != idc.BADADDR):
new_name = idc.AskStr(old_name, "Enter new class name:")
if new_name == None:
self.cls_tree.blockSignals(False)
return
if idc.SetStrucName(cid, new_name) != 0:
item.setText(0,str(new_name))
cls_list = self.__objdigger.get_classes()
for c in cls_list:
if c.ida_name == old_name:
c.ida_name = new_name
else:
idc.Warning("Cannot rename class to %s, the name already exists!" % new_name)
else:
idc.Warning("Cannot rename class before it is applied")
return
def edit_class_item(self, event):
self.cls_tree.blockSignals(True)
item = self.cls_tree.currentItem()
if type(item) is PyOOAnalyzerMemberTreeItem:
self.__edit_member_from_class_viewer(item)
elif type(item) is PyOOAnalyzerMethodTreeItem:
self.__edit_method_from_class_viewer(item)
elif type(item) is PyOOAnalyzerStructTreeItem or type(item) is PyOOAnalyzerParentTreeItem:
self.__edit_class_from_class_viewer(item)
self.cls_tree.blockSignals(False)
def OnCreate(self, form):
# Get parent widget
if ida_version < 6.9:
self.parent = self.FormToPySideWidget(form)
else:
self.parent = self.FormToPyQtWidget(form);
# Create cls_tree control
self.cls_tree = QTreeWidget()
headerItem = QTreeWidgetItem()
headerItem.setText(0,"Class")
headerItem.setText(1,"State")
self.cls_tree.setHeaderItem(headerItem)
self.cls_tree.setWindowTitle("OOAnalzyer Results")
self.cls_tree.setColumnWidth(0, 200)
self.cls_tree.itemSelectionChanged.connect(lambda : self.navigate( self.cls_tree.currentItem()))
# install the context menu
self.cls_tree.setContextMenuPolicy(Qt.CustomContextMenu)
self.cls_tree.customContextMenuRequested.connect(self.open_menu)
# Create layout
layout = QVBoxLayout()
layout.addWidget(self.cls_tree)
self.populate_class_list()
# Populate PluginForm
self.parent.setLayout(layout)
applied_cls = self.__objdigger.get_applied_classes()
for c in applied_cls:
self.__mark_applied(c)
return
def open_menu(self, position):
menu = QMenu()
renameAction = menu.addAction("Rename")
# can only apply classes
applyAction = None
gotoDefAction = None
item = self.cls_tree.currentItem()
if type(item) is PyOOAnalyzerStructTreeItem:
cls = item.class_struct
if cls.applied == False:
applyAction = menu.addAction("Apply")
gotoDefAction = menu.addAction("Open | |
AMR_SOURCES = [
"(w / want-01 :ARG0 (b / boy) :ARG1 (g / go-01 :ARG0 b))"
]
AMR_TARGETS = [
"The boy wants to go."
]
AMR_NOISED = {
"convert-to-triples": {
"src": "order Graph: ( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "<t> want :ARG0 boy <t> want :ARG1 go <t> go :ARG0 boy"
},
"generate-from-triples": {
"src": "order Graph: <t> want :ARG0 boy <t> want :ARG1 go <t> go :ARG0 boy",
"tgt": "The boy wants to go."
},
"mask-all": {
"src": "denoise Graph: ( want :ARG0 <extra_id_0> boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"mask-all-drop": {
"src": "denoise Graph: ( want :ARG0 boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": " ( "
},
"mask-all-mass": {
"src": "denoise Graph: ( want :ARG0 <extra_id_0> boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"mask-components": {
"src": "denoise Graph: ( want :ARG0 <extra_id_0> boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"mask-components-corrupt": {
"src": "denoise Graph: ( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"mask-components-drop": {
"src": "denoise Graph: ( want :ARG0 boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": " ( "
},
"mask-components-mass": {
"src": "denoise Graph: ( want :ARG0 <extra_id_0> boy ) :ARG1 ( go :ARG0 boy ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"mask-nodes": {
"src": "denoise Graph: ( <extra_id_0> :ARG0 ( boy ) :ARG1 ( go :ARG0 <extra_id_1> ) )",
"tgt": "<extra_id_0> want <extra_id_1> boy <extra_id_2>"
},
"mask-nodes-drop": {
"src": "denoise Graph: ( :ARG0 ( boy ) :ARG1 ( go :ARG0 ) )",
"tgt": " want boy "
},
"mask-nodes-mass": {
"src": "denoise Graph: ( <extra_id_0> :ARG0 ( boy ) :ARG1 ( go :ARG0 <extra_id_0> ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"mask-surface": {
"src": "denoise Graph: The boy wants <extra_id_0> go.",
"tgt": "<extra_id_0> to <extra_id_1>"
},
"parse-from-triples": {
"src": "order Graph: <t> want :ARG0 boy <t> want :ARG1 go <t> go :ARG0 boy",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"randomize": {
"src": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "The boy wants to go."
},
"randomize_convert-to-triples": {
"src": "order Graph: ( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "<t> want :ARG1 go <t> want :ARG0 boy <t> go :ARG0 boy"
},
"randomize_generate-from-triples": {
"src": "order Graph: <t> want :ARG1 go <t> want :ARG0 boy <t> go :ARG0 boy",
"tgt": "The boy wants to go."
},
"randomize_mask-all": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"randomize_mask-all-drop": {
"src": "denoise Graph: ( go :ARG1-of want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": " ( "
},
"randomize_mask-all-mass": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )"
},
"randomize_mask-all-mass-unshuffle": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"randomize_mask-components": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"randomize_mask-components-corrupt": {
"src": "denoise Graph: ( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )"
},
"randomize_mask-components-corrupt-unshuffle": {
"src": "denoise Graph: ( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"randomize_mask-components-drop": {
"src": "denoise Graph: ( go :ARG1-of want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": " ( "
},
"randomize_mask-components-mass": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )"
},
"randomize_mask-components-mass-unshuffle": {
"src": "denoise Graph: ( go :ARG1-of <extra_id_0> want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"randomize_mask-nodes": {
"src": "denoise Graph: ( <extra_id_0> :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 <extra_id_1> )",
"tgt": "<extra_id_0> go <extra_id_1> boy <extra_id_2>"
},
"randomize_mask-nodes-drop": {
"src": "denoise Graph: ( :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 )",
"tgt": " go boy "
},
"randomize_mask-nodes-mass": {
"src": "denoise Graph: ( <extra_id_0> :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 <extra_id_0> )",
"tgt": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )"
},
"randomize_mask-nodes-mass-unshuffle": {
"src": "denoise Graph: ( <extra_id_0> :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 <extra_id_0> )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"randomize_parse-from-triples": {
"src": "order Graph: <t> want :ARG1 go <t> want :ARG0 boy <t> go :ARG0 boy",
"tgt": "( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )"
},
"randomize_reorder": {
"src": "order Graph: ( go :ARG1-of ( want :ARG0 ( boy ) ) :ARG0 boy )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"reconfigure": {
"src": "( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "The boy wants to go."
},
"reconfigure_convert-to-triples": {
"src": "order Graph: ( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "<t> want :ARG1 go <t> go :ARG0 boy <t> want :ARG0 boy"
},
"reconfigure_generate-from-triples": {
"src": "order Graph: <t> want :ARG1 go <t> go :ARG0 boy <t> want :ARG0 boy",
"tgt": "The boy wants to go."
},
"reconfigure_mask-all": {
"src": "denoise Graph: ( want :ARG1 <extra_id_0> go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"reconfigure_mask-all-drop": {
"src": "denoise Graph: ( want :ARG1 go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": " ( "
},
"reconfigure_mask-all-mass": {
"src": "denoise Graph: ( want :ARG1 <extra_id_0> go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )"
},
"reconfigure_mask-all-mass-unshuffle": {
"src": "denoise Graph: ( want :ARG1 <extra_id_0> go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"reconfigure_mask-components": {
"src": "denoise Graph: ( want :ARG1 <extra_id_0> go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "<extra_id_0> ( <extra_id_1>"
},
"reconfigure_mask-components-corrupt": {
"src": "denoise Graph: ( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )"
},
"reconfigure_mask-components-corrupt-unshuffle": {
"src": "denoise Graph: ( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "( want :ARG0 ( boy ) :ARG1 ( go :ARG0 boy ) )"
},
"reconfigure_mask-components-drop": {
"src": "denoise Graph: ( want :ARG1 go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": " ( "
},
"reconfigure_mask-components-mass": {
"src": "denoise Graph: ( want :ARG1 <extra_id_0> go :ARG0 boy ) :ARG0 ( boy ) )",
"tgt": "( want :ARG1 ( go :ARG0 boy ) :ARG0 ( boy ) )"
},
"reconfigure_mask-components-mass-unshuffle": {
"src": "denoise | |
address1 is not FAILURE:
elements0.append(address1)
address6 = FAILURE
address6 = self._read__()
if address6 is not FAILURE:
elements0.append(address6)
address7 = FAILURE
address7 = self._read_statement()
if address7 is not FAILURE:
elements0.append(address7)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = TreeNode71(self._input[index1:self._offset], index1, elements0)
self._offset = self._offset
self._cache['block_body'][index0] = (address0, self._offset)
return address0
def _read_loop_name(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['loop_name'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
chunk0, max0 = None, self._offset + 1
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == '@':
address1 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'@\'')
if address1 is not FAILURE:
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read_basic_name()
if address3 is not FAILURE:
elements0.append(address3)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.loop_name(self._input, index1, self._offset, elements0)
self._offset = self._offset
self._cache['loop_name'][index0] = (address0, self._offset)
return address0
def _read_loop_body(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['loop_body'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
chunk0, max0 = None, self._offset + 2
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == 'do':
address1 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'do\'')
if address1 is not FAILURE:
address2 = FAILURE
address2 = self._read__ws()
if address2 is not FAILURE:
address3 = FAILURE
address3 = self._read_stmt_or_expr()
if address3 is not FAILURE:
elements0.append(address3)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.loop_body(self._input, index1, self._offset, elements0)
self._offset = self._offset
self._cache['loop_body'][index0] = (address0, self._offset)
return address0
def _read_but_if_stmt(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['but_if_stmt'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
address1 = self._read_expression()
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
index3 = self._offset
chunk0, max0 = None, self._offset + 3
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == 'but':
address3 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address3 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'but\'')
self._offset = index3
if address3 is not FAILURE:
address3 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address3 = FAILURE
if address3 is not FAILURE:
elements0.append(address3)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = TreeNode75(self._input[index2:self._offset], index2, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
address0 = self._read_stmt_or_expr()
if address0 is FAILURE:
self._offset = index1
self._cache['but_if_stmt'][index0] = (address0, self._offset)
return address0
def _read_but_if(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['but_if'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
chunk0, max0 = None, self._offset + 3
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == 'but':
address1 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'but\'')
if address1 is not FAILURE:
address2 = FAILURE
index2 = self._offset
chunk1, max1 = None, self._offset + 6
if max1 <= self._input_size:
chunk1 = self._input[self._offset:max1]
if chunk1 == 'always':
address2 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address2 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'always\'')
self._offset = index2
if address2 is FAILURE:
address2 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address2 = FAILURE
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
address3 = self._read__()
if address3 is not FAILURE:
elements0.append(address3)
address4 = FAILURE
chunk2, max2 = None, self._offset + 2
if max2 <= self._input_size:
chunk2 = self._input[self._offset:max2]
if chunk2 == 'if':
address4 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address4 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'if\'')
if address4 is not FAILURE:
address5 = FAILURE
address5 = self._read__()
if address5 is not FAILURE:
elements0.append(address5)
address6 = FAILURE
index3 = self._offset
address6 = self._read_expression()
if address6 is FAILURE:
self._offset = index3
chunk3, max3 = None, self._offset + 1
if max3 <= self._input_size:
chunk3 = self._input[self._offset:max3]
if chunk3 == '*':
address6 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address6 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'*\'')
if address6 is FAILURE:
self._offset = index3
if address6 is not FAILURE:
elements0.append(address6)
address7 = FAILURE
address7 = self._read__()
if address7 is not FAILURE:
elements0.append(address7)
address8 = FAILURE
chunk4, max4 = None, self._offset + 6
if max4 <= self._input_size:
chunk4 = self._input[self._offset:max4]
if chunk4 == 'occurs':
address8 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address8 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'occurs\'')
if address8 is not FAILURE:
address9 = FAILURE
address9 = self._read__()
if address9 is not FAILURE:
elements0.append(address9)
address10 = FAILURE
address10 = self._read_then()
if address10 is not FAILURE:
elements0.append(address10)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.but_if(self._input, index1, self._offset, elements0)
self._offset = self._offset
self._cache['but_if'][index0] = (address0, self._offset)
return address0
def _read_always(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['always'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1, elements0 = self._offset, []
address1 = FAILURE
chunk0, max0 = None, self._offset + 3
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == 'but':
address1 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'but\'')
if address1 is not FAILURE:
address2 = FAILURE
address2 = self._read__()
if address2 is not FAILURE:
elements0.append(address2)
address3 = FAILURE
chunk1, max1 = None, self._offset + 6
if max1 <= self._input_size:
chunk1 = self._input[self._offset:max1]
if chunk1 == 'always':
address3 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address3 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'always\'')
if address3 is not FAILURE:
address4 = FAILURE
address4 = self._read__ws()
if address4 is not FAILURE:
elements0.append(address4)
address5 = FAILURE
address5 = self._read_stmt_or_expr()
if address5 is not FAILURE:
elements0.append(address5)
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
else:
elements0 = None
self._offset = index1
if elements0 is None:
address0 = FAILURE
else:
address0 = self._actions.always(self._input, index1, self._offset, elements0)
| |
% arg.type
call.addTemp(temp_arr_type, temp)
call.addTemp("int", "i")
# Generate the call surrounded by temp array allocation, copies, writebacks, and temp free
count = "*%s" % arg.countParam().name
call.addCopy("%s = (%s)malloc(sizeof(%s) * %s);" %
(temp, temp_arr_type, arg.type, count))
# temp free. This is not included in the !MPI_UNDEFINED block
call.addFree(temp)
# generate a copy and a writeback statement for this type of handle
if arg.isStatus():
call.addActualMPI2("((%s == MPI_F_STATUSES_IGNORE) ? MPI_STATUSES_IGNORE : %s)" % (arg.name, temp))
call.addActualMPICH_C2F(temp)
# Status arrays are OUT args. No convertioning on input.
call.addWritebackMPI2("if (%s != MPI_F_STATUSES_IGNORE)" % (arg.name))
call.addWriteback(" for (i=0; i < %s; i++)" % count)
call.addWritebackMPI2(" %s_c2f(&%s[i], &%s[i * MPI_F_STATUS_SIZE]);" % (conv, temp, arg.name))
call.addWritebackMPICH_C2F(" %s_c2f(&%s[i], &%s[i * MPI_F_STATUS_SIZE]);" % (conv, temp, arg.name))
else:
call.addActualC2F(temp)
#if arg.isInout(decl.name):
call.addCopy("for (i=0; i < %s; i++)" % count)
call.addCopy(" temp_%s[i] = %s_f2c(%s[i]);" % (arg.name, conv, arg.name))
if arg.pointers:
call.addWriteback("for (i=0; i < %s; i++)" % count)
call.addWriteback(" %s[i] = %s_c2f(temp_%s[i]);" % (arg.name, conv, arg.name))
if decl.hasArrayIndexOutputParam():
call.addWriteback("}")
call.write(out)
if decl.returnsErrorCode():
out.write(" *ierr = %s;\n" % return_val)
else:
out.write(" return %s;\n" % return_val)
out.write("}\n\n")
# Write out various bindings that delegate to the main fortran wrapper
write_fortran_binding(out, decl, delegate_name, decl.name.upper())
write_fortran_binding(out, decl, delegate_name, decl.name.lower())
write_fortran_binding(out, decl, delegate_name, decl.name.lower() + "_")
write_fortran_binding(out, decl, delegate_name, decl.name.lower() + "__")
################################################################################
# Macros:
# - functions annotated as @macro or @bodymacro define the global macros and
# basic pieces of the generator.
# - include_decl is used to include MPI declarations into function scopes.
################################################################################
# Table of global macros
macros = {}
# This decorator adds macro functions to the outermost function scope.
def macro(macro_name, **attrs):
def decorate(fun):
macros[macro_name] = fun # Add macro to outer scope under supplied name
fun.has_body = False # By default, macros have no body.
for key in attrs: # Optionally set/override attributes
setattr(fun, key, attrs[key])
return fun
return decorate
def handle_list(list_name, list, args):
"""This function handles indexing lists used as macros in the wrapper generator.
There are two syntaxes:
{{<list_name>}} Evaluates to the whole list, e.g. 'foo, bar, baz'
{{<list_name> <index>}} Evaluates to a particular element of a list.
"""
if not args:
return list
else:
len(args) == 1 or syntax_error("Wrong number of args for list expression.")
try:
return list[int(args[0])]
except ValueError:
syntax_error("Invalid index value: '%s'" % args[0])
except IndexError:
syntax_error("Index out of range in '%s': %d" % (list_name, index))
class TypeApplier:
"""This class implements a Macro function for applying something callable to
args in a decl with a particular type.
"""
def __init__(self, decl):
self.decl = decl
def __call__(self, out, scope, args, children):
len(args) == 2 or syntax_error("Wrong number of args in apply macro.")
type, macro_name = args
for arg in self.decl.args:
if arg.cType() == type:
out.write("%s(%s);\n" % (macro_name, arg.name))
class TypeArtTypeApplier:
"""This class implements a Macro function for applying something callable to
args in a decl with a particular type.
"""
def __init__(self, decl):
self.decl = decl
def _find_count_args(self):
count_args = []
count_matcher = re.compile('.*count.*')
for arg in self.decl.args:
if count_matcher.match(arg.name) is not None:
if arg.cType() == 'int':
count_args.append((arg, False))
elif arg.cType() == 'const int[]' or arg.cType() == 'const int*':
count_args.append((arg, True))
else:
print("Unknown count argument: ", arg.cType())
return count_args
def _find_buffer_args(self):
buffer_args = []
buffer_matcher = re.compile('(.*buf.*)|(origin_addr)')
for arg in self.decl.args:
if buffer_matcher.match(arg.name) is not None:
if arg.cType() == 'const void*':
buffer_args.append((arg, True))
elif arg.cType() == 'void*':
buffer_args.append((arg, False))
return buffer_args
def _find_type_args(self):
type_args = []
type_matcher = re.compile('.*type.*')
for arg in self.decl.args:
if arg.cType() == 'MPI_Datatype' and type_matcher.match(arg.name) is not None:
type_args.append(arg)
return type_args
def __call__(self, out, scope, args, children):
len(args) == 2 or syntax_error("Wrong number of args in apply macro.")
#type_s, macro_name = args
#types = type_s.split(',')
op_type, macro_name = args
callargs = ['ta_ret_adr']
buffer_args = self._find_buffer_args()
count_args = self._find_count_args()
type_args = self._find_type_args()
if op_type == 'send':
if len(buffer_args) != 1:
raise RuntimeError('%s: Number of buffer args != 1: %d' % (self.decl.name, len(buffer_args)))
if len(count_args) < 1:
raise RuntimeError('%s: Number of count args < 1: %d' % (self.decl.name, len(count_args)))
if len(type_args) < 1:
raise RuntimeError('%s: Number of type args < 1: %d' % (self.decl.name, len(type_args)))
callargs += [buffer_args[0][0].name, count_args[0][0].name, type_args[0].name]
elif op_type == 'recv':
if len(buffer_args) != 1:
raise RuntimeError('%s: Number of buffer args != 1: %d' % (self.decl.name, len(buffer_args)))
if len(count_args) < 1:
raise RuntimeError('%s: Number of count args < 1: %d' % (self.decl.name, len(count_args)))
if len(type_args) < 1:
raise RuntimeError('%s: Number of type args < 1: %d' % (self.decl.name, len(type_args)))
callargs += [buffer_args[0][0].name, count_args[0][0].name, type_args[0].name]
elif op_type == 'sendrecv':
if len(buffer_args) != 2:
raise RuntimeError('%s: Number of buffer args != 1: %d' % (self.decl.name, len(buffer_args)))
if len(count_args) < 1:
raise RuntimeError('%s: Number of count args < 1: %d' % (self.decl.name, len(count_args)))
if len(type_args) < 1:
raise RuntimeError('%s: Number of type args < 1: %d' % (self.decl.name, len(type_args)))
if len(count_args) == 1:
count_args.append(count_args[0])
if len(type_args) == 1:
type_args.append(type_args[0])
# TODO: Figure out how to properly handle count vectors
send_count = count_args[0][0].name
if count_args[0][1]:
send_count = '1'
recv_count = count_args[1][0].name
if count_args[1][1]:
recv_count = '1'
callargs += [buffer_args[0][0].name, send_count, type_args[0].name, buffer_args[1][0].name, recv_count, type_args[1].name]
else:
return
arg_string = callargs[0]
for callarg in callargs[1:]:
arg_string += ", " + callarg
out.write("%s(\"%s\", %s);\n" % (macro_name, self.decl.name, arg_string))
'''
print(' ', macro_name, ': ', self.decl.name)
pos_buf = 0
for ty in types:
pos = 0
for arg in self.decl.args:
print(arg)
pos += 1
if arg.cType() == ty:
argc += 1
if callargs == "" and not (arg.name == "count" or arg.name == "sendcount" or arg.name == "recvcount"):
pos_buf = pos
callargs=arg.name
elif (not callargs == "") and pos >= pos_buf and (arg.name == "count" or arg.name == "sendcount" or arg.name == "recvcount"):
callargs+=", " + arg.name
break
if argc == len(type):
break
if argc == len(type):
out.write("%s(\"%s\", %s, %s);\n" % (macro_name, self.decl.name, 'ta_ret_adr', callargs))
'''
def include_decl(scope, decl):
"""This function is used by macros to include attributes MPI declarations in their scope."""
scope["ret_type"] = decl.retType()
scope["args"] = decl.argNames()
scope["nargs"] = len(decl.argNames())
scope["types"] = decl.types()
scope["formals"] = decl.formals()
scope["apply_to_type"] = TypeApplier(decl)
scope["apply_typeart_check"] = TypeArtTypeApplier(decl)
scope.function_name = decl.name
# These are old-stype, deprecated names.
def get_arg(out, scope, args, children):
return handle_list("args", decl.argNames(), args)
scope["get_arg"] = get_arg
scope["applyToType"] = scope["apply_to_type"]
scope["retType"] = scope["ret_type"]
scope["argList"] = "(%s)" % ", ".join(scope["args"])
scope["argTypeList"] = "(%s)" % ", ".join(scope["formals"])
def all_but(fn_list):
"""Return a list of all mpi functions except those in fn_list"""
all_mpi = set(mpi_functions.keys())
diff = all_mpi - set(fn_list)
return [x for x in sorted(diff)]
@macro("foreachfn", has_body=True)
def foreachfn(out, scope, args, children):
"""Iterate over all functions listed in args."""
args or syntax_error("Error: foreachfn requires function name argument.")
global cur_function
fn_var = args[0]
for fn_name in args[1:]:
cur_function = fn_name
if not fn_name in mpi_functions:
syntax_error(fn_name + " is not an MPI function")
fn = mpi_functions[fn_name]
fn_scope = Scope(scope)
fn_scope[fn_var] = fn_name
include_decl(fn_scope, fn)
for child in children:
child.evaluate(out, fn_scope)
cur_function = None
@macro("fn", has_body=True)
def fn(out, scope, args, children):
"""Iterate over listed functions and generate skeleton too."""
args or syntax_error("Error: fn requires function name argument.")
global cur_function
fn_var = args[0]
for fn_name in args[1:]:
optional = fn_name.endswith('?')
if optional:
fn_name = fn_name[:-1]
cur_function = fn_name
if not fn_name in mpi_functions:
if optional:
continue
syntax_error(fn_name + " is not an MPI function")
fn = mpi_functions[fn_name]
return_val = "_wrap_py_return_val"
fn_scope = Scope(scope)
fn_scope[fn_var] = fn_name
include_decl(fn_scope, fn)
fn_scope["ret_val"] = return_val
fn_scope["returnVal"] = fn_scope["ret_val"] # deprecated name.
if static_dir:
out = static_out(fn_name)
if ignore_deprecated:
c_call = "%s\n%s = P%s(%s);\n%s" % ("WRAP_MPI_CALL_PREFIX", return_val, fn.name, ", ".join(fn.argNames()), "WRAP_MPI_CALL_POSTFIX")
else:
c_call = "%s = P%s(%s);" % (return_val, fn.name, ", ".join(fn.argNames()))
if fn_name == "MPI_Init" and output_fortran_wrappers:
def callfn(out, scope, args, children):
# All this is to deal with fortran, since fortran's MPI_Init() function is different
# from C's. We need to make sure to delegate specifically to the fortran init wrapping.
# For dynamic libs, we use weak symbols to pick it automatically. For static libs, need
# to rely on input from the user via pmpi_init_binding and the -i option.
out.write(" if (fortran_init) {\n")
if static_dir:
out.write(" if | |
come into play?
@staticmethod
def select_action(node, temperature):
"""
Select action according to the visit count distribution and the temperature.
The temperature is changed dynamically with the visit_softmax_temperature function in the config.
:param node: We will get all the visit counts for all of this node's child nodes so we can calculate the visit_count_distribution to select the action.
:param temperature: Some value used in the voodoo we use to determine the visit_count_distribution
:return: action
"""
# Get visit counts for all of the input node's child nodes.
visit_counts = numpy.array([child.visit_count for child in node.children.values()], dtype="int32")
# Make a list of actions which have resulted in a node. Is this a list of all possible actions?
actions = [action for action in node.children.keys()] # node.children is a dictionary, where the child nodes are the values and each child node's key is the action taken to get that child node.
# If temperature == 0, select the action of the node with the most visits, if temperature == infinity, select the action randomly. If the temperature is somewhere in between, use some voodoo with the temperature and visit counts to select the action.
if temperature == 0:
action = actions[numpy.argmax(visit_counts)]
elif temperature == float("inf"):
action = numpy.random.choice(actions)
else:
# Some voodoo to get the distribution of the visit count of each action. See paper appendix Data Generation
visit_count_distribution = visit_counts ** (1 / temperature)
visit_count_distribution = visit_count_distribution / sum(visit_count_distribution)
action = numpy.random.choice(actions, p=visit_count_distribution) # The probability for each action is determined by the visit_count_distribution.
return action
# This MCTS is game independent, so you can use it to play any game.
class MCTS:
"""
Core Monte Carlo Tree Search algorithm.
To decide on an action, we run N simulations, always starting at the root of the search tree and traversing the tree according to the UCB formula until we reach a
leaf node. (UCB = Upper-Confidence-Bound, which is some voodoo used in MCTS, see https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Monte_Carlo_Method)
"""
# Not much in the config, the real meat is in run()
def __init__(self, config):
self.config = config
# Runs the MCTS, usually called at the same time the MCTS is initialized. to_play is the current player. override_root_with is the node to override the root node with.
# FIXME: I still don't entirely understand this thing and exactly how it's used. You'll see my comments expounding my confusion below.
def run(self, model, observation, legal_actions, to_play, add_exploration_noise, override_root_with=None):
"""
At the root of the search tree we use the representation function to obtain a hidden state of the current observation.
We then run a Monte Carlo Tree Search using only the action sequences and the model learned by the network. Cool, right?
"""
if override_root_with:
root = override_root_with
root_predicted_value = None
else:
# Set up and expand the root node.
root = Node(0) # Initialize the root node
observation = (
torch.tensor(observation)
.float()
.unsqueeze(0)
.to(next(model.parameters()).device)
)
root_predicted_value, reward, policy_logits, hidden_state = model.initial_inference(observation) # model.initial_inference uses the representation function to encode MuZero's first (aka initial) observation.
### models.support_to_scalar is used in here, but I hadn't implemented that yet, so let's go over to models.py and implement it now!
root_predicted_value = models.support_to_scalar(root_predicted_value, self.config.support_size).item()
reward = models.support_to_scalar(reward, self.config.support_size).item()
assert legal_actions, f"Legal actions should not be an empty array. Got {legal_actions}." # Assert legal_actions exists.
assert set(legal_actions).issubset(set(self.config.action_space)), "Legal actions should be a subset of the action space."
root.expand(legal_actions, to_play, reward, policy_logits, hidden_state)
if add_exploration_noise:
root.add_exploration_noise(dirichlet_alpha=self.config.root_dirichlet_alpha, exploration_fraction=self.config.root_exploration_fraction)
min_max_stats = MinMaxStats() # So this is what holds the "min-max values of the tree", so I guess that means it holds the best actions the tree predicts you could take?
max_tree_depth = 0 # Initialize the depth of the tree.
# With the root set up, FIXME: run as many tree searches as we set for num_simulations? Really, we're running more than one MCTS? I'm not sure if that's awesome or unnecessary.
# FIXME: Wait, I'm confused, are we running MULTIPLE tree searches here, or is this loop just used to expand a single tree?
# Based on what I know I would assume the latter, but why does the for loop set the node to root each time? Are we making the tree branch by branch rather than layer by layer?
# But what about branches that have the same starting nodes?
for _ in range(self.config.num_simulations):
virtual_to_play = to_play
node = root
search_path = [node] # So is this to hold a branch of the tree? Or an entire tree?
current_tree_depth = 0
# Run the rest of the tree search. Not actually in this while loop, though, this while loop is just used to... what?
# I think this is used to create a single branch? Not the rest of the tree search? But wouldn't that need the dynamics function?
# Wait, or is this used to skip through the nodes that we've already created in a branch and skip to the leaf of the branch so we can expand it? Is that it?
while node.expanded():
current_tree_depth += 1 # Increment tree depth
action, node = self.select_child(node, min_max_stats) # Select best action and determine the next node from the current node based on the min_max_stats.
search_path.append(node)
# Players play turn by turn. But wait, wouldn't this if-else only work for two player games? What about 3+ players?
# Oh, wait, below it says "More than two player mode not implemented." Ok. Whatever, our game is 1 player anyway
if virtual_to_play + 1 < len(self.config.players):
virtual_to_play = self.config.players[virtual_to_play + 1]
else:
virtual_to_play = self.config.players[0]
# Here's the REALLY cool part:
# Inside the search tree we use the dynamics function to obtain the next hidden state given an action and the previous hidden state.
parent = search_path[-2]
# model.recurrent_inference is where we call the dynamics function to obtain each hidden state from its previous state, including the hidden state from the representation function from the initial observation.
value, reward, policy_logits, hidden_state = model.recurrent_inference(parent.hidden_state, torch.tensor([[action]]).to(parent.hidden_state.device))
value = models.support_to_scalar(value, self.config.support_size).item()
reward = models.support_to_scalar(reward, self.config.support_size).item()
node.expand(self.config.action_space, virtual_to_play, reward, policy_logits, hidden_state) # create a new node from the previous one?
self.backpropagate(search_path, value, virtual_to_play, min_max_stats) # Backpropagate to improve the network or tree or something.
max_tree_depth = max(max_tree_depth, current_tree_depth) # The depth of the biggest tree we've made here.
extra_info = {
"max_tree_depth": max_tree_depth,
"root_predicted_value": root_predicted_value
}
return root, extra_info # FIXME: So why are we returning the root? Are we... using the root value to determine the next move or something?
def select_child(self, node, min_max_stats):
"""
Select the child with the highest UCB score, aka the best action.
"""
max_ucb = max(self.ucb_score(node, child, min_max_stats) for action, child in node.children.items()) # Find the action with the highest ucb score of the node's children.
# Select an action randomly from a list of children with the highest ucb score. So wait, wouldn't that list only have one item? Unless there are two children with the max score.
action = numpy.random.choice([action for action, child in node.children.items() if self.ucb_score(node, child, min_max_stats) == max_ucb])
return action, node.children[action]
def ucb_score(self, parent, child, min_max_stats):
"""
Find the ucb score for a node based on its value, plus an exploration bonus based on the prior. I'm not sure what a prior is.
"""
# Basically this is all voodoo, so unfortunately I don't have much to say about it.
# The score seems to mostly be based on the node's visit count, so where do the neural nets come into play?
# OH! The neural nets come into play in backpropagate(), as backpropagate() uses the reward and value from the dynamics function to improve the min_max_stats! Great!
pb_c = (
math.log(
(parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base
)
+ self.config.pb_c_init
)
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
| |
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
from typing import Union
from HSTB.kluster.surface_helpers import *
class QuadTree:
"""
Adapted from https://github.com/GliderToolsCommunity/GliderTools/blob/master/glidertools/mapping.py
Recursively splits data into quadrants
Object oriented quadtree can access children recursively
Ultimately, we want to:
- save huge datasets in a way that allows for lazy loading by location
- save indices that allow you to update the grid when the soundings change
- allow for constraining ultimate grid sizes to powers of two
- allow for utm and geographic coordinate systems
- implement mean/CUBE algorithms for cell depth
- write locks on quads, file lock? one master lock for now?
"""
def __init__(self, data: Union[np.ndarray, xr.Dataset], mins=None, maxs=None, max_points_per_quad=5,
max_grid_size=128, min_grid_size=0.5, location=[], index=[], parent=None):
self.parent = parent # parent quad instance, None if this is the top quad
self.data = QuadData(data)
if self.parent is None: # first run through make sure the input data to Root is of the right type
self.data.validate_input_data()
emptyquad = self.data.is_empty()
# can't save boolean to json/zarr attribute, need to encode as a diff type, this kind of sucks but leaving it for now
self.is_leaf = False # is the end of a quad split, contains the data has no children
self.quad_depths = []
self.quad_depth = 0
self.max_grid_size = max_grid_size
self.min_grid_size = min_grid_size
self.max_points_per_quad = max_points_per_quad
if mins is None and maxs is None:
if not emptyquad:
xval = self.data.getvalues('x')
yval = self.data.getvalues('y')
self.mins = [np.min(xval), np.min(yval)]
self.maxs = [np.max(xval), np.max(yval)]
self._align_toplevel_grid()
else: # get here when you intialize empty quad to then load()
self.mins = [0, 0]
self.maxs = [0, 0]
else:
self.mins = mins
self.maxs = maxs
if not index:
if not emptyquad:
self.index = np.arange(self.data()['x'].shape[0]).tolist()
else: # get here when you intialize empty quad to then load()
self.index = []
else:
self.index = index
self.sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]
self.n_points = self.data()['x'].shape[0]
self.location = location
self.tree_depth = len(location)
self.children = []
if not emptyquad:
index_q0, index_q1, index_q2, index_q3, xmin, xmax, ymin, ymax, xmid, ymid = self._build_quadrant_indices()
top_left_data = self.data.mask_data(index_q0)
top_right_data = self.data.mask_data(index_q1)
bottom_left_data = self.data.mask_data(index_q2)
bottom_right_data = self.data.mask_data(index_q3)
should_divide = self._build_split_check(len(top_left_data['x']), len(top_right_data['x']), len(bottom_left_data['x']), len(bottom_right_data['x']))
else:
should_divide = False
if should_divide:
props = dict(max_points_per_quad=max_points_per_quad, min_grid_size=min_grid_size, max_grid_size=max_grid_size, parent=self)
self.children.append(QuadTree(top_left_data, [xmin, ymid], [xmid, ymax], index=np.array(self.index)[index_q0].tolist(), location=location + [0], **props))
self.children.append(QuadTree(top_right_data, [xmid, ymid], [xmax, ymax], index=np.array(self.index)[index_q1].tolist(), location=location + [1], **props))
self.children.append(QuadTree(bottom_left_data, [xmin, ymin], [xmid, ymid], index=np.array(self.index)[index_q2].tolist(), location=location + [2], **props))
self.children.append(QuadTree(bottom_right_data, [xmid, ymin], [xmax, ymid], index=np.array(self.index)[index_q3].tolist(), location=location + [3], **props))
self.index = []
self.data = None
else:
self.is_leaf = True
if not self.data.is_empty():
if self.data.check_data_names('z'):
self.quad_depth = float(self.data.getvalues('z').mean())
self.root.quad_depths.append(self.quad_depth)
def save(self, path, storage=StorePickles):
handle = storage.save(self, path)
storage.clear_children(handle)
for i, child in enumerate(self.children):
child_handle = storage.child_path(handle, storage.child_names[i])
child.save(child_handle, storage=storage)
@classmethod
def load(cls, path, storage=StorePickles):
loaded_quad = cls(np.zeros((1,), dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])) # initialize a new object
handle = storage.load(loaded_quad, path)
if storage.has_children(handle):
for i in storage.child_names:
child = cls.load(storage.child_path(handle, i), storage=storage)
child.parent = loaded_quad
loaded_quad.children.append(child)
return loaded_quad
def __getitem__(self, args, silent=False):
"""
Go through the quadtree and locate the quadtree at the provided index, see self.loc
"""
args = np.array(args, ndmin=1)
if any(args > 3):
raise UserWarning("A quadtree only has 4 possible children, provided locations: {}".format(args))
quadtree = self
passed = []
for depth in args:
if (len(quadtree.children) > 0) | (not silent):
quadtree = quadtree.children[depth]
passed += [depth]
else:
return None
return quadtree
def __repr__(self):
return "<{} : {}>".format(str(self.__class__)[1:-1], str(self.location))
def __str__(self):
location = str(self.location)[1:-1]
location = location if location != "" else "[] - base QuadTree has no location"
# boundaries and spacing to make it pretty
left, top = self.mins
right, bot = self.maxs
wspace = " " * len("{:.2f}".format(top))
strtspace = " " * (15 - max(0, (len("{:.2f}".format(top)) - 6)))
# text output (what youll see when you print the object)
about_tree = "\n".join(
[
"",
"QuadTree object",
"===============",
" location: {}".format(location),
" tree depth: {}".format(len(self.location)),
" n_points: {}".format(self.n_points),
" boundaries: {:.2f}".format(top),
"{}{:.2f}{}{:.2f}".format(strtspace, left, wspace, right),
" {:.2f}".format(bot),
" children_points: {}".format(str([c.n_points for c in self.children])),
]
)
return about_tree
def _align_toplevel_grid(self):
"""
So that our grids will line up nicely with each other (as long as they use max_grid_size that are divisable
by a similar number) we adjust the max/min of the top level grid to an even multiple of max_grid_size
"""
# align origin with multiple of max_grid_size
double_max_grid = self.max_grid_size * 4
self.mins[0] -= self.mins[0] % double_max_grid
self.mins[1] -= self.mins[1] % double_max_grid
# extend the grid to make it square and an even multiple of the max grid size
max_range = max(self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1])
maxadjust = max_range % double_max_grid
if maxadjust:
max_range += (double_max_grid - maxadjust)
self.maxs[0] = self.mins[0] + max_range
self.maxs[1] = self.mins[1] + max_range
def _build_quadrant_indices(self):
"""
Determine the data indices that split the data into four quadrants
Returns
-------
np.array
data indices that correspond to points in the top left quadrant
np.array
data indices that correspond to points in the top right quadrant
np.array
data indices that correspond to points in the bottom left quadrant
np.array
data indices that correspond to points in the bottom right quadrant
float
minimum x value of the input points
float
maximum x value of the input points
float
minimum y value of the input points
float
maximum y value of the input points
float
x midpoint value of the input points
float
y midpoint value of the input points
"""
xmin, ymin = self.mins
xmax, ymax = self.maxs
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
# split the data into four quadrants
xval = self.data.getvalues('x')
yval = self.data.getvalues('y')
xval_lessthan = xval <= xmid
xval_greaterthan = xval >= xmid
yval_lessthan = yval <= ymid
yval_greaterthan = yval >= ymid
index_q0 = xval_lessthan & yval_greaterthan # top left
index_q1 = xval_greaterthan & yval_greaterthan # top right
index_q2 = xval_lessthan & yval_lessthan # bottom left
index_q3 = xval_greaterthan & yval_lessthan # bottom right
return index_q0, index_q1, index_q2, index_q3, xmin, xmax, ymin, ymax, xmid, ymid
def _build_split_check(self, q0_size: int, q1_size: int, q2_size: int, q3_size: int):
"""
Builds a check to determine whether or not this quadrant should be divided. Uses:
point_check - points in the quad must not exceed the provided maximum allowable points
max_size_check - quad size must not exceed the provided maximum allowable grid size
min_size_check - quad size (after splitting) must not end up less than minimum allowable grid size
too_few_points_check - if you know that splitting will lead to less than allowable max points, dont split
empty_quad_check - if there are three quadrants that are empty, split so that you don't end up with big
quads that are mostly empty
Parameters
----------
q0_size
size of points that belong to the top left quadrant
q1_size
size of points that belong to the top right quadrant
q2_size
size of points that belong to the bottom left quadrant
q3_size
size of points that belong to the bottom right quadrant
Returns
-------
bool
if True, split this quad into 4 quadrants
"""
point_check = self.n_points > self.max_points_per_quad
max_size_check = self.sizes[0] > self.max_grid_size
min_size_check = self.sizes[0] / 2 >= self.min_grid_size
too_few_points_check = True
empty_quad_check = False
if self.n_points <= self.max_points_per_quad * 4: # only do these checks if there are just a few points, they are costly
too_few_points_quads = [q0_size >= self.max_points_per_quad or q0_size == 0,
q1_size >= self.max_points_per_quad or q1_size == 0,
q2_size >= self.max_points_per_quad or q2_size == 0,
q3_size >= self.max_points_per_quad or q3_size == 0]
too_few_points_check = np.count_nonzero(too_few_points_quads) == 4
if self.n_points <= self.max_points_per_quad:
empty_quads = [q0_size == 0, q1_size == 0, q2_size == 0, q3_size == 0]
empty_quad_check = np.count_nonzero(empty_quads) == 3
too_few_points_check = True # hotwire this, we always split when there are three empty quadrants and we are greater than min resolution
if (point_check or max_size_check or empty_quad_check) and min_size_check and too_few_points_check:
return True
return False
def _traverse_tree(self):
"""
| |
<gh_stars>1-10
# This Python file uses the following encoding: utf-8
#############################
##
# QNX6FS Partition Parser and Automatic file extraction
## -----------------------------------------------------
#
## Author: <NAME> (<EMAIL>)
# Revision: 0.2d rev2 (release-candidate) / Dec 2019
##
# updates posted @ https://www.forensicfocus.com/Forums/viewtopic/t=16846/
##
####################
#!/usr/bin/python
# -*- coding: utf-8 -*-
import binascii, math, zlib, sys, re, os, errno
from struct import *
from collections import OrderedDict
class QNX6FS:
PARTITION_MAGIC = {'QNX4':0x002f,'QNX6':0x68191122}
FILE_TYPE = {'DIRECTORY':0x01,'DELETED':0x02,'FILE':0x03}
QNX6_SUPERBLOCK_SIZE = 0x200 #Superblock is fixed (512 bytes)
QNX6_SUPERBLOCK_AREA = 0x1000 #Area reserved for superblock
QNX6_BOOTBLOCK_SIZE = 0x2000 #Boot Block Size
QNX6_DIR_ENTRY_SIZE = 0x20 #Dir block size (32 bytes)
QNX6_INODE_SIZE = 0x80 #INode block size (128 bytes)
QNX6_INODE_SIZE_BITS = 0x07 #INode entry size shift
QNX6_NO_DIRECT_POINTERS = 16 #Max Direct iNodes
QNX6_PTR_MAX_LEVELS = 5 #Max Indirect iNodes
QNX6_SHORT_NAME_MAX = 27 #Short Name Max Length
QNX6_LONG_NAME_MAX = 510 #Long Name Max Length
def __init__(self, source):
self.TARGET_ = source
def GetPartitions(self):
with open(self.TARGET_, "rb") as handle:
DataBlock = handle.read(512);
##Split DataBlock into parts
BootCode = DataBlock[0:446]
MasterPartitionTable = DataBlock[446:510]
BootRecordSignature = DataBlock[510:512]
##Detect if MBR is valid.
BootRecordSignature = unpack('H', BootRecordSignature)[0]
if BootRecordSignature == 0xAA55:
if ord(BootCode[0]) == 235:
return self.parseNoPartitionQNX(handle,0)
else:
print "[-] BootRecordSignature Detected."
return self.parsePartitionMBR(handle,0)
else:
raise IOError('[ERROR] BootRecordSignature Missing; Invalid Disk Image')
exit()
return null
def parsePartitionMBR(self, fileIO, offset, blockSize=512):
fileIO.seek(offset,0); #absolute from start of file
DataBlock = fileIO.read(blockSize)
PartitionTable = DataBlock[446:510]
PartitionList={}
for i in range(0,4):
Offset= 0 + (i * 16);
PartitionList[i]={}
PartitionList[i]['BootIndicator'] = PartitionTable[Offset+0]
PartitionList[i]['StartingCHS'] = PartitionTable[Offset+1:Offset+4]
PartitionList[i]['PartitionType'] = PartitionTable[Offset+4]
PartitionList[i]['EndingCHS'] = PartitionTable[Offset+5:8]
PartitionList[i]['StartingSector'] = unpack('<I',PartitionTable[Offset+8:Offset+12])[0]
PartitionList[i]['PartitionSize'] = unpack('<I',PartitionTable[Offset+12:Offset+16])[0]
PartitionList[i]['EndingSector'] = ((PartitionList[i]['PartitionSize']) + (PartitionList[i]['StartingSector']) -1)
PartitionList[i]['StartingOffset'] = PartitionList[i]['StartingSector'] * 512
PartitionList[i]['EndOffset'] = PartitionList[i]['EndingSector'] * 512
PartitionList[i]['SectorSize'] = 512
PartitionList[i]['qnx6'] = False
PartitionID = ord(PartitionList[i]['PartitionType'])
if PartitionID == 0x05 or PartitionID == 0x0F:
print "[-] (EBR) Extended Boot Record Detected, Processing...."
Parts = self.parsePartitionMBR(fileIO, offset + PartitionList[i]['StartingSector'], blockSize)
for partID in range(0, len(Parts)):
PartitionList[len(PartitionList) + 1] = Parts[partID]
elif ( PartitionID == 0xB1 or PartitionID == 0xB2 or PartitionID == 0xB3 or PartitionID == 0xB4 ):
print "[+] Supported QNX6FS Partition Detected @",format(PartitionList[i]['StartingOffset'],"02x")
PartitionList[i]['qnx6'] = True
elif ( PartitionID == 0x4D or PartitionID == 0x4E or PartitionID == 0x4F ):
print "[X] Unsupported QNX4FS Partition Detected @",format(PartitionList[i]['StartingOffset'],"02x")
else:
print format(PartitionID,"02x")
print PartitionList[i]
return PartitionList;
def parseNoPartitionQNX(self, fileIO, offset, blockSize=512):
fileIO.seek(offset,0); #absolute from start of file
DataBlock = fileIO.read(blockSize)
PartitionTable = DataBlock[446:510]
PartitionList={}
i=0
Offset= 0;
PartitionList[i]={}
PartitionList[i]['BootIndicator'] = 0
PartitionList[i]['StartingCHS'] = 0
PartitionList[i]['PartitionType'] = chr(0xB1)+""
PartitionList[i]['EndingCHS'] = 0
PartitionList[i]['StartingSector'] = 0
PartitionList[i]['PartitionSize'] = 0
PartitionList[i]['EndingSector'] = 0
PartitionList[i]['StartingOffset'] = 0
PartitionList[i]['EndOffset'] = 0
PartitionList[i]['SectorSize'] = 512
PartitionList[i]['qnx6'] = True
PartitionID = ord(PartitionList[i]['PartitionType'])
print "[+] Supported QNX6FS Partition Detected @",format(PartitionList[i]['StartingOffset'],"02x")
return PartitionList;
def ParseQNX(self, Partition, PartitionID):
self.fileIO = open(self.TARGET_, "rb")
Offset = self.QNX6_BOOTBLOCK_SIZE + Partition['StartingOffset']
self.Offset = Offset - self.QNX6_BOOTBLOCK_SIZE # + self.QNX6_SUPERBLOCK_AREA
### We are not interested in the Boot block, so jump straight past it.
self.fileIO.seek( ( Partition['StartingOffset'] + self.QNX6_BOOTBLOCK_SIZE ) , 0 )
## The superblock should only be 512bytes long.
Data = self.fileIO.read( self.QNX6_SUPERBLOCK_SIZE )
SuperBlock = self.parseQNX6SuperBlock(Data, Partition['StartingOffset'])
self.SuperBlock = SuperBlock;
## If the blocksize != 512, then super block is longer, re-read the area.
if (SuperBlock['blocksize'] != 512):
self.fileIO.seek( ( Partition['StartingOffset'] + self.QNX6_BOOTBLOCK_SIZE ) , 0 )
Data = self.fileIO.read( SuperBlock['blocksize'] )
SuperBlock = self.parseQNX6SuperBlock(Data, Partition['StartingOffset'])
if SuperBlock['magic'] == self.PARTITION_MAGIC['QNX6']:
print " |---+ First SuperBlock Detected","( Serial:", SuperBlock['serial'],") @",format(Offset,"02x")
BackupSuperBlock_Offset = Partition['StartingOffset'] + self.QNX6_SUPERBLOCK_AREA + self.QNX6_BOOTBLOCK_SIZE + ( SuperBlock['num_blocks'] * SuperBlock['blocksize'])
self.fileIO.seek(BackupSuperBlock_Offset, 0)
Data = self.fileIO.read( SuperBlock['blocksize'] )
blkSuperBlock = self.parseQNX6SuperBlock(Data, Partition['StartingOffset'])
if blkSuperBlock['magic'] == self.PARTITION_MAGIC['QNX6']:
print " |---+ Second SuperBlock Detected ","( Serial:", blkSuperBlock['serial'],") @",format(BackupSuperBlock_Offset,"02x")
if blkSuperBlock['serial'] < SuperBlock['serial']:
SB = SuperBlock
print " |---+ Using First SuperBlock as Active Block"
else:
SB = blkSuperBlock
print " |---+ Using Second SuperBlock as Active Block"
self.printSuperBlockInfo(SB)
self.parseBitmap(SB)
self.LongNames = self.parseLongFileNames(SB)
#for i in self.LongNames:
# print format(int(i),'02x'), self.LongNames[i]
self.parseINODE(SB,PartitionID)
def printSuperBlockInfo(self, SB):
print " |--- volumeID:\t", ("".join("%02x" % q for q in SB['volumeid'] )).upper()
print " |--- checksums:\t", ("0x%0.8X" % SB['checksum'])
print " |--- num_inodes:\t", SB['num_inodes']
print " |--- num_blocks:\t", SB['num_blocks']
print " |--- blocksize:\t", SB['blocksize']
print " |--- blkoffset: \t", SB['blks_offset']
def parseQNX6SuperBlock(self, sb, offset): # B = 8 , H = 16 , I = 32 , L = 32 , Q = 64
SB = {}
SB['magic'] = unpack('<I', sb[:4])[0]
SB['checksum'] = (unpack('>I', sb[4:8])[0])
SB['checksum_calc'] = zlib.crc32(sb[8:512],0) & 0xFFFFFFFF
SB['serial'] = unpack('<Q', sb[8:16])[0]
SB['ctime'] = unpack('<I', sb[16:20])[0]
SB['atime'] = unpack('<I', sb[20:24])[0]
SB['flags'] = unpack('<I', sb[24:28])[0]
SB['version1'] = unpack('<H', sb[28:30])[0]
SB['version2'] = unpack('<H', sb[30:32])[0]
SB['volumeid'] = unpack('<16B', sb[32:48])
SB['blocksize'] = unpack('<I', sb[48:52])[0]
SB['num_inodes'] = unpack('<I', sb[52:56])[0]
SB['free_inodes'] = unpack('<I', sb[56:60])[0]
SB['num_blocks'] = unpack('<I', sb[60:64])[0]
SB['free_blocks'] = unpack('<I', sb[64:68])[0]
SB['allocgroup'] = unpack('<I', sb[68:72])[0]
SB['Inode'] = self.parseQNX6RootNode(sb[72:152]) ##80bytes
SB['Bitmap'] = self.parseQNX6RootNode(sb[152:232])
SB['Longfile'] = self.parseQNX6RootNode(sb[232:312])
SB['Unknown'] = self.parseQNX6RootNode(sb[312:392])
SB['blks_offset'] = offset + self.QNX6_SUPERBLOCK_AREA + self.QNX6_BOOTBLOCK_SIZE;
return SB
def parseQNX6RootNode(self,rn):
RN = {}
RN['size'] = unpack('<Q', rn[0:8])[0]
RN['ptr'] = unpack('<16I', rn[8:72])
RN['level'] = unpack('<B', rn[72:73])[0]
RN['mode'] = unpack('<B', rn[73:74])[0]
RN['reserved'] = unpack('<6B', rn[74:80])[0]
return RN
def parseINODE(self,superBlock,PartitionID):
print " |--+ Inode: Detected - Processing...."
#print " |---Size:", superBlock['Inode']['size']
#print " |---Level:", superBlock['Inode']['level']
#print " |---Mode:", superBlock['Inode']['mode']
self.INodeTree = {}
self.DirTree = {}
if (superBlock['Inode']['level'] > self.QNX6_PTR_MAX_LEVELS):
print "[x] invalid Inode structure."
return 0
#print " |--+PTR: "
for n in range(0, 16):
ptr = superBlock['Inode']['ptr'][n]
if self.checkQNX6blkptr(ptr):
ptr_ = (ptr*superBlock['blocksize'])+superBlock['blks_offset'];
#print " |--",n," : ",format(ptr_,'02x')
superBlock['Inodes'] = self.praseQNX6Inode(ptr,superBlock['Inode']['level'],superBlock['blocksize'],superBlock['blks_offset'])
print "[-] Generating directory Listing && Auto Extracting Files to (.\\Extracted\\Partition"+str(PartitionID)+")"
self.parseINodeDIRStruct(superBlock['blocksize'],superBlock['blks_offset'])
for i in self.DirTree:
self.dumpfile(i,'.\\Extraced\\',superBlock['blocksize'],superBlock['blks_offset'],PartitionID)
#self.parseINodeDIRbyID(1,superBlock['blocksize'],superBlock['blks_offset'])
def parseLongFileNames(self,superBlock):
print " |--+ Longfile: Detected - Processing...."
#print " |---Size:", superBlock['Longfile']['size']
#print " |---Level:", superBlock['Longfile']['level']
#print " |---Mode:", superBlock['Longfile']['mode']
if (superBlock['Longfile']['level'] > self.QNX6_PTR_MAX_LEVELS):
print " *invalid levels, too many*"
#print " |---PTR: "
longnames = []
for n in range(0, 16):
ptr = superBlock['Longfile']['ptr'][n]
if self.checkQNX6blkptr(ptr):
ptrB = (ptr*superBlock['blocksize'])+superBlock['blks_offset'];
#print " |--",n,":",format(ptrB,'02x')
longnames.append(self.parseQNX6LongFilename(ptr,superBlock['Longfile']['level'],superBlock['blocksize'],superBlock['blks_offset']))
##Make Dictionary with all Names and INode/PTRs
count = 1
Dict = {}
for i in longnames:
if i != None:
for q in i:
if q != None:
Dict[count] = i[q]
count = count + 1;
return Dict
def parseQNX6LongFilename(self,ptr_,level,blksize,blksOffset):
self.fileIO.seek((ptr_*blksize)+blksOffset)
handle = self.fileIO.read(512)
LogFilenameNode={}
if level == 0:
size = unpack('<H',handle[0:2])
fname = unpack('<'+str(size[0])+'B',handle[2:size[0]+2])
if size[0] > 0:
LogFilenameNode[str(ptr_)] = str("".join("%c" % i for i in fname )).strip()
return LogFilenameNode
else:
return None
else:
Pointers = unpack('<128I', handle)
for i in range(0, 128):
if (self.checkQNX6blkptr(Pointers[i]) != False):
name = (self.parseQNX6LongFilename(Pointers[i],level-1,blksize,blksOffset))
if name != None:
if level >= 1:
LogFilenameNode[str(Pointers[i])]=name[str(Pointers[i])]
else:
LogFilenameNode[str(Pointers[i])]=name
return LogFilenameNode
def praseQNX6Inode(self,ptr,level,blksize,blksOffset):
ptr_=(ptr*blksize)+blksOffset
if self.checkQNX6blkptr(ptr_) and ptr != 0xffffffff:
self.fileIO.seek(ptr_)
RawData = self.fileIO.read(blksize)
if level >= 1:
Pointers = unpack('<'+str(blksize/4)+'I', RawData)
for i in range(0, (blksize/4)):
if self.checkQNX6blkptr((Pointers[i]*blksize)+blksOffset):
self.praseQNX6Inode(Pointers[i],level-1,blksize,blksOffset)
else:
inode_range = (blksize / 128)
for i in range(0,inode_range):
try:
item = self.parseQNX6InodeEntry(RawData[(i*(blksize/inode_range)):((i+1)*(blksize/inode_range))])
self.INodeTree[len(self.INodeTree)+1] = item
except:
print i, len(self.INodeTree), format(ptr_,'02x'), format(ptr,'02x')
self.INodeTree[len(self.INodeTree)+1] = None
break
def parseQNX6InodeEntry(self, ie): #qnx6_inode_entry 128bytes
IE = {}
IE['size'] = unpack('<Q',ie[0:8])[0]
IE['uid'] = unpack('<I',ie[8:12])[0]
IE['gid'] = unpack('<I',ie[12:16])[0]
IE['ftime'] = unpack('<I',ie[16:20])[0]
IE['mtime'] = unpack('<I',ie[20:24])[0]
IE['atime'] = unpack('<I',ie[24:28])[0]
IE['ctime'] = unpack('<I',ie[28:32])[0]
###S_IFREG 0100000 S_IFDIR 040000 S_IRUSR 0400 S_IWUSR 0200 S_IXUSR 0100
IE['mode'] = unpack('<H',ie[32:34])[0]
IE['ext_mode'] = unpack('<H',ie[34:36])[0]
IE['block_ptr'] = unpack('<16I',ie[36:100])
IE['filelevels'] = unpack('<B',ie[100:101])[0]
IE['status'] = unpack('<B',ie[101:102])[0]
IE['unknown2'] = unpack('<2B',ie[102:104])
IE['zero2'] = unpack('<6I',ie[104:128])
return IE
def parseINodeDIRStruct(self,blksize,blksOffset,INodeID=1):
InodeEntry = self.INodeTree[INodeID]
## Check INodeEntry Exists and is a directory type Inode;
if InodeEntry != None and (self.InodeEntry_ISDIR(InodeEntry['mode'])):
## Parse all 16 pointers in InodeEntry and make batchlist
PhysicalPTRs = []
for pointer_index in InodeEntry['block_ptr']:
## Make sure pointer != 0xFFFFFFFF
if pointer_index != 0xffffffff:
## Calculate Physical Location.
PhysicalPTRs.append((pointer_index*blksize)+blksOffset)
## If we have atleast 1 valid pointer process it for Dirs and Files
if len(PhysicalPTRs) > 0:
objects = self.parseInodeDirBatch(PhysicalPTRs,blksize,blksOffset)
##find perant INode ID (. and .. will be same at root == 1)
rootID = 0
for i in objects:
if objects[i]['Name'] == ".":
rootID=objects[i]['PTR']
break;
for i in objects:
obj = objects[i]
if obj['Name'] != ".." and obj['Name'] != ".":
self.DirTree[ obj['PTR'] ] = {'Name':obj['Name'],'ROOT_INODE':rootID}
##Recursively Process all Dirs
if obj['PTR'] > 1:
self.parseINodeDIRStruct(blksize,blksOffset,obj['PTR'])
def parseInodeDirBatch(self,ptrs,blksize,blksOffset):
DIR = {}
for ptr in ptrs:
self.fileIO.seek(ptr)
RawData = self.fileIO.read(blksize)
for i in range(0,(blksize/32)):
raw = RawData[ i*32: ((i+1)*32) ]
if (unpack('<I', raw[0:4])[0] != 0):
DIR[str(ptr)+"-"+str(i)]={}
DIR[str(ptr)+"-"+str(i)]['PTR'] = unpack('<I', raw[0:4])[0]
DIR[str(ptr)+"-"+str(i)]['Length'] = unpack('<B', raw[4:5])[0]
if DIR[str(ptr)+"-"+str(i)]['Length'] <= self.QNX6_SHORT_NAME_MAX:
DIR[str(ptr)+"-"+str(i)]['Name'] = "".join("%c" % i for i in unpack('<27B', raw[5:32] ) ).replace("\x00","")
else:
#print format(ptr,'02x'), "---" , format((ptr * blksize ) + blksOffset,'02x') , unpack('>I', raw[12:16])[0]
DIR[str(ptr)+"-"+str(i)]['Name'] = self.LongNames[unpack('>I', raw[5:9])[0]+1] #self.LongNames[unpack('<I', raw[12:16])[0]]
return DIR
def parseINodeDIRbyID(self,INodeID,blksize,blksOffset,level=0):
InodeEntry = self.INodeTree[INodeID]
##print InodeEntry
if (InodeEntry != None) and (self.InodeEntry_ISDIR(InodeEntry['mode'])):
for q in range(0,16):
if InodeEntry['block_ptr'][q] != 0xffffffff:
_ptr=(InodeEntry['block_ptr'][q]*blksize)+blksOffset
##print INodeID, "++", format(_ptr,'02x')
DIRS = self.parseInodeDir(_ptr,blksize,blksOffset)
root = 0
for idir in DIRS:
if (DIRS[idir]['Length'] > 0) and (DIRS[idir]['Length'] < 28):
if DIRS[idir]['Name'] != "." and DIRS[idir]['Name'] != "..":
print (" "*level),"+-",DIRS[idir]['Name'] #, " -- " , DIRS[idir]['PTR']
elif DIRS[idir]['Name'] == "..":
root = DIRS[idir]['PTR'];
if DIRS[idir]['PTR'] != INodeID and DIRS[idir]['Name'] != "." and DIRS[idir]['Name'] != ".." and DIRS[idir]['PTR'] > 2 :
self.parseINodeDIRbyID(DIRS[idir]['PTR'],blksize,blksOffset,level+1)
def parseInodeDir(self,ptr,blksize,blksOffset):
self.fileIO.seek(ptr)
RawData = self.fileIO.read(blksize)
DIR = {}
for i in range(0,(blksize/32)):
raw = RawData[ i*32: ((i+1)*32) ]
DIR[i]={}
DIR[i]['PTR'] = unpack('<I', raw[0:4])[0]
DIR[i]['Length'] = unpack('<B', raw[4:5])[0]
DIR[i]['Name'] = "".join("%c" % i for i in unpack('<27B', raw[5:32] ) ).replace("\x00","")
return DIR
def checkQNX6blkptr(self, ptr):
mask = ( 1 << ptr.bit_length()) -1
return (ptr ^ mask == 0) == False
def InodeEntry_ISDIR(self,mode):
return ((mode & 040000) == 040000)
def InodeEntry_ISREG(self,mode):
return ((mode & 0100000) == 0100000)
def InodeEntry_ISLNK(self,mode):
return ((mode & 0120000) == 0120000)
def parseBitmap(self,superBlock):
self.Bitmaps = {}
print " |--+ Bitmap: Detected - Processing.... (using fast mode, this will still take a while.)"
#print " |---Size:", superBlock['Bitmap']['size']
#print " |---Level:", superBlock['Bitmap']['level']
#print " |---Mode:", superBlock['Bitmap']['mode']
if (superBlock['Bitmap']['level'] > self.QNX6_PTR_MAX_LEVELS):
print " *invalid levels, too many*"
#print " |--+PTR: "
for n in range(0, 16):
ptr = superBlock['Bitmap']['ptr'][n]
if self.checkQNX6blkptr(ptr):
ptr_ = (ptr*superBlock['blocksize'])+superBlock['blks_offset'];
#print " |--",n," : ",format(ptr_,'02x')
self.praseQNX6Bitmap(ptr,superBlock['Bitmap']['level'],superBlock['blocksize'],superBlock['blks_offset'])
#if len(self.Bitmaps) > 0:
# for i in range(1,len(self.Bitmaps)):
# print format(self.Bitmaps[i]['PTR'],'02x')
dcount = 0;
count = 0;
if len(self.Bitmaps) > 0:
count = 0;
for i in range(1,len(self.Bitmaps)):
Data = self.Bitmaps[i]['DATA']
for byte in Data:
if ord(byte) > 0:
for ii in range(0,7):
bit = ((ord(byte) >> ii) & 00000001 )
#print bit,
#sys.stdout.write(str(bit))
#sys.stdout.flush()
if bit == 0:
if self.isBlockEmpty(count,superBlock['blocksize'],superBlock['blks_offset']) == False:
dcount = dcount + 1;
PhysicalPTR=((count)*superBlock['blocksize'])+superBlock['blks_offset']
snip=self.getSnippet(count,superBlock['blocksize'],superBlock['blks_offset'])
#print " |---Deleted Data @:", format(PhysicalPTR,'02x') , "(",snip,")"
count | |
<filename>scicopia/tests/test_flask_db.py<gh_stars>0
import pytest
import time
from flask import current_app, g, session, request
from werkzeug.security import generate_password_hash
from scicopia.tests.data.initFlaskData import main as init
init()#doc=True,elastic=True,user=True
import scicopia.app.db as db
from scicopia.app import create_app
from scicopia.app.main import main
@pytest.fixture
def app():
app = create_app('testing')
yield app
# link(texts: List[str])
@main.route("/test_link_https", methods=["GET", "POST"])
def link_https():
texts = ['Test3: https://en.wikipedia.org more text.', 'Text without link.']
control = ['Test3: <a href="https://en.wikipedia.org">https://en.wikipedia.org</a> more text.', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_https(app):
with app.test_client() as c:
c.post('/test_link_https')
@main.route("/test_link_https_long", methods=["GET", "POST"])
def link_https_long():
texts = ['Test2: https://https://en.wikipedia.org/wiki/Main_Page', 'Text without link.']
control = ['Test2: <a href="https://https://en.wikipedia.org/wiki/Main_Page">https://https://en.wikipedia.org/wiki/Main_Page</a>', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_https_long(app):
with app.test_client() as c:
c.post('/test_link_https_long')
@main.route("/test_link_http", methods=["GET", "POST"])
def link_http():
texts = ['Test4: http://en.wikipedia.org more text.', 'Text without link.']
control = ['Test4: <a href="http://en.wikipedia.org">http://en.wikipedia.org</a> more text.', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_http(app):
with app.test_client() as c:
c.post('/test_link_http')
@main.route("/test_link_wrong_http", methods=["GET", "POST"])
def link_wrong_http():
texts = ['Test6: https:/en.wikipedia.org more text.', 'Text without link.']
control = ['Test6: https:/en.wikipedia.org more text.', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_wrong_http(app):
with app.test_client() as c:
c.post('/test_link_wrong_http')
@main.route("/test_link_nohttp", methods=["GET", "POST"])
def link_nohttp():
texts = ['Test1: wikipedia.org', 'Text without link.']
control = ['Test1: wikipedia.org', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_nohttp(app):
with app.test_client() as c:
c.post('/test_link_nohttp')
@main.route("/test_link_www", methods=["GET", "POST"])
def link_www():
texts = ['Test5: www.en.wikipedia.org more text.', 'Text without link.']
control = ['Test5: www.en.wikipedia.org more text.', 'Text without link.']
output = db.link(texts)
assert output == control
return "0"
def test_link_www(app):
with app.test_client() as c:
c.post('/test_link_www')
# analyze_input(input: str)
@main.route("/test_analyze_input_must_word", methods=["GET", "POST"])
def analyze_input_must_word():
input = "lorem"
control = {'must': [{'multi_match': {'query': 'lorem'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_word')
@main.route("/test_analyze_input_must_phrase", methods=["GET", "POST"])
def analyze_input_must_phrase():
input = "'lorem'"
control = {'must': [{'multi_match': {'query': 'lorem', 'type': 'phrase'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_phrase(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_phrase')
@main.route("/test_analyze_input_must_words", methods=["GET", "POST"])
def analyze_input_must_words():
input = "lorem ipsum"
control = {'must': [{'multi_match': {'query': 'lorem'}}, {'multi_match': {'query': 'ipsum'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_words(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_words')
@main.route("/test_analyze_input_must_phrase2", methods=["GET", "POST"])
def analyze_input_must_phrase2():
input = "'lorem ipsum'"
control = {'must': [{'multi_match': {'query': 'lorem ipsum', 'type': 'phrase'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_phrase2(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_phrase2')
@main.route("/test_analyze_input_must_word_title", methods=["GET", "POST"])
def analyze_input_must_word_title():
input = "titel:lorem ipsum"
control = {'must': [{'match': {'titel': 'lorem'}}, {'multi_match': {'query': 'ipsum'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_word_title(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_word_title')
@main.route("/test_analyze_input_must_title_phrase2", methods=["GET", "POST"])
def analyze_input_must_title_phrase2():
input = "titel:'lorem ipsum'"
control = {'must': [{'match_phrase': {'titel': 'lorem ipsum'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_title_phrase2(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_title_phrase2')
@main.route("/test_analyze_input_mustnot_word", methods=["GET", "POST"])
def analyze_input_mustnot_word():
input = "-lorem"
control = {'must': [], 'must_not': [{'multi_match': {'query': 'lorem'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_mustnot_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_mustnot_word')
@main.route("/test_analyze_input_mustnot_phrase", methods=["GET", "POST"])
def analyze_input_mustnot_phrase():
# adding of restrictions to conditions is a known, unsolved, bug
input = "-'lorem'"
control = {'must': [], 'must_not': [{'multi_match': {'query': 'lorem', 'type': 'phrase'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_mustnot_phrase(app):
with app.test_client() as c:
c.post('/test_analyze_input_mustnot_phrase')
@main.route("/test_analyze_input_must_word_mustnot_word", methods=["GET", "POST"])
def analyze_input_must_word_mustnot_word():
input = "-lorem ipsum"
control = {'must': [{'multi_match': {'query': 'ipsum'}}], 'must_not': [{'multi_match': {'query': 'lorem'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_word_mustnot_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_word_mustnot_word')
@main.route("/test_analyze_input_mustnot_phrase2", methods=["GET", "POST"])
def analyze_input_mustnot_phrase2():
# adding of restrictions to conditions is a known, unsolved, bug
input = "-'lorem ipsum'"
control = {'must': [], 'must_not': [{'multi_match': {'query': 'lorem ipsum', 'type': 'phrase'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_mustnot_phrase2(app):
with app.test_client() as c:
c.post('/test_analyze_input_mustnot_phrase2')
@main.route("/test_analyze_input_must_word_mustnot_title_word", methods=["GET", "POST"])
def analyze_input_must_word_mustnot_title_word():
# adding of restrictions to conditions is a known, unsolved, bug
input = "-titel:lorem ipsum"
control = {'must': [{'multi_match': {'query': 'ipsum'}}], 'must_not': [{'match': {'titel': 'lorem'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_word_mustnot_title_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_word_mustnot_title_word')
@main.route("/test_analyze_input_mustnot_title_phrase2", methods=["GET", "POST"])
def analyze_input_mustnot_title_phrase2():
# adding of restrictions to conditions is a known, unsolved, bug
input = "-titel:'lorem ipsum'"
control = {'must': [], 'must_not': [{'match_phrase': {'titel': 'lorem ipsum'}}]}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_mustnot_title_phrase2(app):
with app.test_client() as c:
c.post('/test_analyze_input_mustnot_title_phrase2')
@main.route("/test_analyze_input_must_query_word", methods=["GET", "POST"])
def analyze_input_must_query_word():
input = "query:lorem"
control = {'must': [{'match': {'query': 'lorem'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_query_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_query_word')
@main.route("/test_analyze_input_must_query_phrase2", methods=["GET", "POST"])
def analyze_input_must_query_phrase2():
input = "query:'lorem ipsum'"
control = {'must': [{'match_phrase': {'query': 'lorem ipsum'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_query_phrase2(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_query_phrase2')
@main.route("/test_analyze_input_must_quer_word", methods=["GET", "POST"])
def analyze_input_must_quer_word():
input = "quer:lorem"
control = {'must': [{'match': {'quer': 'lorem'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_quer_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_quer_word')
@main.route("/test_analyze_input_must_ta_word", methods=["GET", "POST"])
def analyze_input_must_ta_word():
input = "ta:lorem"
control = {'must': [{'match': {'ta': 'lorem'}}], 'must_not': []}
output = db.analyze_input(input)
assert output == control
return "0"
def test_analyze_input_must_ta_word(app):
with app.test_client() as c:
c.post('/test_analyze_input_must_ta_word')
# checkFields(condition, fields)
@main.route("/test_checkFields_word", methods=["GET", "POST"])
def checkFields_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'multi_match': {'query': 'lorem'}}
control = ({'multi_match': {'query': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_word(app):
with app.test_client() as c:
c.post('/test_checkFields_word')
@main.route("/test_checkFields_phrase", methods=["GET", "POST"])
def checkFields_phrase():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'multi_match': {'query': 'lorem', 'type': 'phrase'}}
control = ({'multi_match': {'query': 'lorem', 'type': 'phrase'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_phrase(app):
with app.test_client() as c:
c.post('/test_checkFields_phrase')
@main.route("/test_checkFields_title_word", methods=["GET", "POST"])
def checkFields_title_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'title': 'lorem'}}
control = ({'match': {'title': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_title_word(app):
with app.test_client() as c:
c.post('/test_checkFields_title_word')
@main.route("/test_checkFields_titel_word", methods=["GET", "POST"])
def checkFields_titel_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'titel': 'lorem'}}
control = ({'match': {'title': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_titel_word(app):
with app.test_client() as c:
c.post('/test_checkFields_titel_word')
@main.route("/test_checkFields_titel_phrase2", methods=["GET", "POST"])
def checkFields_titel_phrase2():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match_phrase': {'titel': 'lorem ipsum'}}
control = ({'match_phrase': {'title': 'lorem ipsum'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_titel_phrase2(app):
with app.test_client() as c:
c.post('/test_checkFields_titel_phrase2')
@main.route("/test_checkFields_tag_word", methods=["GET", "POST"])
def checkFields_tag_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'tag': 'lorem'}}
control = ({'terms': {'tags': ['lorem']}}, "lorem")
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_tag_word(app):
with app.test_client() as c:
c.post('/test_checkFields_tag_word')
@main.route("/test_checkFields_tag_phrase2", methods=["GET", "POST"])
def checkFields_tag_phrase2():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match_phrase': {'tag': 'lorem ipsum'}}
control = ({'terms': {'tags': ['lorem ipsum']}}, "lorem ipsum")
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_tag_phrase2(app):
with app.test_client() as c:
c.post('/test_checkFields_tag_phrase2')
@main.route("/test_checkFields_query_word", methods=["GET", "POST"])
def checkFields_query_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'query': 'lorem'}}
control = ({'multi_match': {'query': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_query_word(app):
with app.test_client() as c:
c.post('/test_checkFields_query_word')
@main.route("/test_checkFields_query_phrase2", methods=["GET", "POST"])
def checkFields_query_phrase2():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match_phrase': {'query': 'lorem ipsum'}}
control = ({'multi_match': {'query': 'lorem ipsum', 'type': 'phrase'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_query_phrase2(app):
with app.test_client() as c:
c.post('/test_checkFields_query_phrase2')
@main.route("/test_checkFields_quer_phrase", methods=["GET", "POST"])
def checkFields_quer_phrase():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'quer': 'lorem'}}
control = ({'multi_match': {'query': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_quer_phrase(app):
with app.test_client() as c:
c.post('/test_checkFields_quer_phrase')
@main.route("/test_checkFields_ta_word", methods=["GET", "POST"])
def checkFields_ta_word():
fields = ['author', 'title', 'abstract', 'tags']
condition = {'match': {'ta': 'lorem'}}
control = ({'multi_match': {'query': 'lorem'}}, False)
out = db.checkFields(condition, fields)
assert out == control
return "0"
def test_checkFields_ta_word(app):
with app.test_client() as c:
c.post('/test_checkFields_ta_word')
# newsearch()
@main.route("/test_newsearch", methods=["GET", "POST"])
def newsearch():
db.newsearch()
return "0"
def test_newsearch_must_word(app):
with app.test_client() as c:
with c.session_transaction() as sess:
# Input: "lorem"
sess["condition"] = {}
sess['condition']['must'] = [{'multi_match': {'query': 'lorem'}}]
sess["condition"]["must_not"] = []
c.post('/test_newsearch')
assert session["query"] == "'lorem'"
def test_newsearch_must_phrase(app):
with app.test_client() as c:
with c.session_transaction() as sess:
# Input: "'lorem'"
sess["condition"] = {}
sess['condition']['must'] = [{'multi_match': {'query': 'lorem', 'type': 'phrase'}}]
| |
if customcpunumber:
# TODO: This is probably not correct, fix it
cmd['details[0]["cpuNumber"]'] = customcpunumber
if customcpuspeed:
# TODO: This is probably not correct, fix it
cmd['details[0]["cpuSpeed"]'] = customcpuspeed
if custommemory:
# TODO: This is probably not correct, fix it
cmd['details[0]["memory"]'] = custommemory
if rootdisksize >= 0:
cmd['details[0]["rootdisksize"]'] = rootdisksize
if group:
cmd['group'] = group
# program default access to ssh
if mode.lower() == 'basic':
cls.ssh_access_group(api_client, cmd)
virtual_machine = VirtualMachine(api_client.deployVirtualMachine(method=method, **cmd), services, api_client=api_client)
waitforjob(api_client, virtual_machine.jobid)
virtual_machine = cls.list(api_client=api_client, method=method, id=virtual_machine.id)[0]
# virtual_machine['ssh_ip'] = virtual_machine['nic'][0]['ipaddress']
cls.__ssh_ip = virtual_machine.nic[0].ipaddress
if startvm is False:
# virtual_machine['public_ip'] = virtual_machine['nic'][0]['ipaddress']
cls.__public_ip = virtual_machine.nic[0].ipaddress
# return VirtualMachine(virtual_machine, services)
return virtual_machine
# program ssh access over NAT via PF
if mode.lower() == 'advanced':
cls.access_ssh_over_nat(
api_client,
services,
virtual_machine,
allow_egress=allow_egress,
networkid=cmd['networkids'][0] if cmd['networkids'] else None)
elif mode.lower() == 'basic':
if cls.__publicip is not None:
vm_ssh_ip = virtual_machine.publicip
else:
# regular basic zone with security group
vm_ssh_ip = virtual_machine.nic[0].ipaddress
# virtual_machine['ssh_ip'] = vm_ssh_ip
cls.__ssh_ip = vm_ssh_ip
# virtual_machine['public_ip'] = vm_ssh_ip
cls.__public_ip = vm_ssh_ip
# return VirtualMachine(virtual_machine, services)
return virtual_machine
def start(self, api_client):
"""Start the instance"""
cmd = {'id': self.id}
api_client.startVirtualMachine(**cmd)
response = self.validateState(api_client, RUNNING)
if response[0] == FAIL:
raise Exception(response[1])
return
def stop(self, api_client, forced=None):
"""Stop the instance"""
cmd = {'id': self.id}
if forced:
cmd['forced'] = forced
api_client.stopVirtualMachine(**cmd)
response = self.validateState(api_client, STOPPED)
if response[0] == FAIL:
raise Exception(response[1])
return
def reboot(self, api_client):
"""Reboot the instance"""
cmd = {'id': self.id}
api_client.rebootVirtualMachine(**cmd)
response = self.validateState(api_client, RUNNING)
if response[0] == FAIL:
raise Exception(response[1])
def recover(self, api_client):
"""Recover the instance"""
cmd = {'id': self.id}
api_client.recoverVirtualMachine(**cmd)
response = self.validateState(api_client, STOPPED)
if response[0] == FAIL:
raise Exception(response[1])
def restore(self, api_client, templateid=None):
"""Restore the instance"""
cmd = {'virtualmachineid': self.id}
if templateid:
cmd['templateid'] = templateid
return api_client.restoreVirtualMachine(**cmd)
def get_ssh_client(
self, ipaddress=None, reconnect=False, port=None,
keyPairFileLocation=None, retries=20, retryinterv=30, timeout=10.0):
"""Get SSH object of VM"""
# If NAT Rules are not created while VM deployment in Advanced mode
# then, IP address must be passed
if ipaddress is not None:
self.__ssh_ip = ipaddress
if port:
self.ssh_port = port
if keyPairFileLocation is not None:
self.password = <PASSWORD>
if reconnect:
self.ssh_client = is_server_ssh_ready(
self.__ssh_ip,
self.ssh_port,
self.username,
self.password,
retries=retries,
retry_interval=retryinterv,
timeout=timeout,
key_pair_file_location=keyPairFileLocation
)
self.ssh_client = self.ssh_client or is_server_ssh_ready(
self.__ssh_ip,
self.ssh_port,
self.username,
self.password,
retries=retries,
retry_interval=retryinterv,
timeout=timeout,
key_pair_file_location=keyPairFileLocation
)
return self.ssh_client
def test_ssh_connectivity(self, retries=2, expect_connection=True, retryinterv=None, timeout=None):
got_connection = False
try:
self.get_ssh_client(reconnect=True, retries=retries, retryinterv=retryinterv, timeout=timeout)
got_connection = True
except Exception as e:
if expect_connection:
raise Exception("Exception: %s" % e)
return expect_connection == got_connection
def validateState(self, api_client, state, timeout=600, interval=5):
"""List VM and check if its state is as expected
@returnValue - List[Result, Reason]
1) Result - FAIL if there is any exception
in the operation or VM state does not change
to expected state in given time else PASS
2) Reason - Reason for failure"""
return validate_state(api_client, self, state, timeout, interval)
@staticmethod
def state_check_function(objects, state):
return str(objects[0].state).lower().decode("string_escape") == str(state).lower()
def resetSshKey(self, api_client, **kwargs):
"""Resets SSH key"""
cmd = {'id': self.id}
cmd.update(kwargs)
return api_client.resetSSHKeyForVirtualMachine(**cmd)
def update(self, api_client, **kwargs):
"""Updates the VM data"""
cmd = {'id': self.id}
cmd.update(kwargs)
return api_client.updateVirtualMachine(**cmd)
def delete(self, api_client, expunge=True, **kwargs):
"""Destroy an Instance"""
cmd = {'id': self.id, 'expunge': expunge, 'fetch_result': True}
cmd.update(kwargs)
api_client.destroyVirtualMachine(**cmd)
def expunge(self, api_client):
"""Expunge an Instance"""
cmd = {'id': self.id}
api_client.expungeVirtualMachine(**cmd)
def migrate(self, api_client, hostid=None):
"""migrate an Instance"""
cmd = {'virtualmachineid': self.id, 'fetch_result': True}
if hostid:
cmd['hostid'] = hostid
return VirtualMachine(api_client.migrateVirtualMachine(**cmd).get('virtualmachine'))
def migrate_vm_with_volume(self, api_client, hostid=None, migrateto=None):
"""migrate an Instance and its volumes"""
cmd = {'virtualmachineid': self.id}
if hostid:
cmd['hostid'] = hostid
if migrateto:
migrateto = []
for volume, pool in migrateto.items():
cmd.migrateto.append({
'volume': volume,
'pool': pool
})
api_client.migrateVirtualMachineWithVolume(**cmd)
def attach_volume(self, api_client, volume, deviceid=None):
"""Attach volume to instance"""
cmd = {'id': volume.id, 'virtualmachineid': self.id}
if deviceid is not None:
cmd['deviceid'] = deviceid
return api_client.attachVolume(**cmd)
@staticmethod
def detach_volume(api_client, volume):
"""Detach volume to instance"""
cmd = {'id': volume.id}
return api_client.detachVolume(**cmd)
def add_nic(self, api_client, networkId, ipaddress=None):
"""Add a NIC to a VM"""
cmd = {'virtualmachineid': self.id, 'networkid': networkId}
if ipaddress:
cmd['ipaddress'] = ipaddress
return api_client.addNicToVirtualMachine(**cmd)
def remove_nic(self, api_client, nicId):
"""Remove a NIC to a VM"""
cmd = {'nicid': nicId, 'virtualmachineid': self.id}
return api_client.removeNicFromVirtualMachine(**cmd)
def update_default_nic(self, api_client, nicId):
"""Set a NIC to be the default network adapter for a VM"""
cmd = {'nicid': nicId, 'virtualmachineid': self.id}
return api_client.updateDefaultNicForVirtualMachine(**cmd)
def attach_iso(self, api_client, iso):
"""Attach ISO to instance"""
cmd = {'id': iso.id, 'virtualmachineid': self.id}
return api_client.attachIso(**cmd)
def detach_iso(self, api_client):
"""Detach ISO to instance"""
cmd = {'virtualmachineid': self.id}
return api_client.detachIso(**cmd)
def scale_virtualmachine(self, api_client, serviceOfferingId):
""" Scale up of service offering for the Instance"""
cmd = {'id': self.id, 'serviceofferingid': serviceOfferingId}
return api_client.scaleVirtualMachine(**cmd)
def change_service_offering(self, api_client, serviceOfferingId):
"""Change service offering of the instance"""
cmd = {'id': self.id, 'serviceofferingid': serviceOfferingId}
return api_client.changeServiceForVirtualMachine(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""List all VMs matching criteria"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return super(VirtualMachine, cls).list(api_client.listVirtualMachines(**cmd).get('virtualmachine'), api_client=api_client)
def resetPassword(self, api_client):
"""Resets VM password if VM created using password enabled template"""
cmd = {'id': self.id}
try:
response = api_client.resetPasswordForVirtualMachine(**cmd)
except Exception as e:
raise Exception("Reset Password failed! - %s" % e)
if response is not None:
return response.password
def assign_virtual_machine(self, api_client, account, domainid):
"""Move a user VM to another user under same domain."""
cmd = {'virtualmachineid': self.id, 'account': account, 'domainid': domainid}
try:
response = api_client.assignVirtualMachine(**cmd)
return response
except Exception as e:
raise Exception("assignVirtualMachine failed - %s" % e)
def update_affinity_group(self, api_client, affinitygroupids=None,
affinitygroupnames=None):
"""Update affinity group of a VM"""
cmd = {'id': self.id}
if affinitygroupids:
cmd['affinitygroupids'] = affinitygroupids
if affinitygroupnames:
cmd['affinitygroupnames'] = affinitygroupnames
return api_client.updateVMAffinityGroup(**cmd)
def scale(self, api_client, serviceOfferingId,
customcpunumber=None, customcpuspeed=None, custommemory=None):
"""Change service offering of the instance"""
cmd = {'id': self.id, 'serviceofferingid': serviceOfferingId, 'details': [{"cpuNumber": "", "cpuSpeed": "", "memory": ""}]}
if customcpunumber:
cmd['details[0]["cpuNumber"]'] = customcpunumber
if customcpuspeed:
cmd['details[0]["cpuSpeed"]'] = customcpuspeed
if custommemory:
cmd['details[0]["memory"]'] = custommemory
return api_client.scaleVirtualMachine(**cmd)
class Volume:
"""Manage Volume Life cycle
"""
def __init__(self, items):
self.__dict__.update(items)
@classmethod
def create(cls, api_client, services, zoneid=None, account=None,
domainid=None, diskofferingid=None, projectid=None, size=None):
"""Create Volume"""
cmd = {'name': "-".join([services["diskname"], random_gen()])}
if diskofferingid:
cmd['diskofferingid'] = diskofferingid
elif "diskofferingid" in services:
cmd['diskofferingid'] = services["diskofferingid"]
if zoneid:
cmd['zoneid'] = zoneid
elif "zoneid" in services:
cmd['zoneid'] = services["zoneid"]
if account:
cmd['account'] = account
elif "account" in services:
cmd['account'] = services["account"]
if domainid:
cmd['domainid'] = domainid
elif "domainid" in services:
cmd['domainid'] = services["domainid"]
if projectid:
cmd['projectid'] = projectid
if size:
cmd['size'] = size
return Volume(api_client.createVolume(**cmd))
@classmethod
def create_custom_disk(cls, api_client, services, account=None,
domainid=None, diskofferingid=None):
"""Create Volume from Custom disk offering"""
cmd = {'name': services["diskname"]}
if diskofferingid:
cmd['diskofferingid'] = diskofferingid
elif "customdiskofferingid" in services:
cmd['diskofferingid'] = services["customdiskofferingid"]
cmd['size'] = services["customdisksize"]
cmd['zoneid'] = services["zoneid"]
if account:
cmd['account'] = account
else:
cmd['account'] = services["account"]
if domainid:
cmd['domainid'] = domainid
else:
cmd['domainid'] = services["domainid"]
return Volume(api_client.createVolume(**cmd))
@classmethod
def create_from_snapshot(cls, api_client, snapshot_id, services,
account=None, domainid=None):
"""Create Volume from snapshot"""
cmd = {'name': "-".join([services["diskname"], random_gen()]), 'snapshotid': snapshot_id, 'zoneid': services["zoneid"], 'size': services["size"]}
if services["ispublic"]:
cmd['ispublic'] = services["ispublic"]
else:
cmd['ispublic'] = False
if account:
cmd['account'] = account
else:
cmd['account'] = services["account"]
if domainid:
cmd['domainid'] = domainid
else:
cmd['domainid'] = services["domainid"]
return Volume(api_client.createVolume(**cmd))
def delete(self, api_client):
"""Delete Volume"""
cmd = {'id': self.id}
api_client.deleteVolume(**cmd)
@classmethod
def list(cls, api_client, **kwargs):
"""List all volumes matching criteria"""
cmd = {}
cmd.update(kwargs)
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd['listall'] = True
return api_client.listVolumes(**cmd)
def resize(self, api_client, **kwargs):
"""Resize a volume"""
cmd = {'id': self.id}
cmd.update(kwargs)
return api_client.resizeVolume(**cmd)
@classmethod
def upload(cls, api_client, services, zoneid=None,
account=None, domainid=None, url=None, **kwargs):
"""Uploads the volume to specified account"""
cmd = {}
if zoneid:
cmd['zoneid'] = zoneid
if account:
cmd['account'] = account
if domainid:
cmd['domainid'] = domainid
cmd['format'] = services["format"]
cmd['name'] = services["diskname"]
if url:
cmd['url'] = url
else:
cmd['url'] = services["url"]
cmd.update(kwargs)
return Volume(api_client.uploadVolume(**cmd))
def wait_for_upload(self, api_client, timeout=10, interval=60):
"""Wait for upload"""
# Sleep to ensure template is in proper state before download
time.sleep(interval)
while True:
volume_response = Volume.list(
api_client,
id=self.id,
zoneid=self.zoneid,
)
if isinstance(volume_response, list):
volume = volume_response[0]
# If volume is ready,
# volume.state = Allocated
if volume.state == 'Uploaded':
break
elif 'Uploading' in | |
from dataset import *
import transforms
import time
import os
from networks import ResnetGenerator,Discriminator
from utils import denorm,tensor2numpy,RGB2BGR,cam,BCEWithLogitsLoss,RhoClipper
from paddle.fluid.layers import ones_like,zeros_like
import paddle.fluid as fluid
from paddle.fluid.dygraph import L1Loss,MSELoss,to_variable
import numpy as np
import cv2
import paddle
DATASET = "datasets"
A_TEST_LIST_FILE = "data/" + DATASET + "/testA.txt"
B_TEST_LIST_FILE = "data/" + DATASET + "/testB.txt"
IMAGES_ROOT = "data/" + DATASET + "/"
class UGATIT(object):
def __init__(self, args):
self.light = args.light
if self.light:
self.model_name = 'UGATIT_light'
else:
self.model_name = 'UGATIT'
self.result_dir = args.result_dir
self.dataset = args.dataset
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.lr = args.lr
self.weight_decay = args.weight_decay
self.ch = args.ch
""" Weight """
self.adv_weight = args.adv_weight
self.cycle_weight = args.cycle_weight
self.identity_weight = args.identity_weight
self.cam_weight = args.cam_weight
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.img_size = args.img_size
self.img_ch = args.img_ch
self.device = args.device
self.benchmark_flag = args.benchmark_flag
self.resume = args.resume
##################################################################################
# Model
##################################################################################
def optimizer_setting(self,parameters):
lr = 0.0001
optimizer = fluid.optimizer.Adam(
learning_rate=lr,
parameter_list=parameters,
beta1=0.5, beta2=0.999, regularization=fluid.regularizer.L2Decay(self.weight_decay))
return optimizer
def build_model(self):
""" DataLoader """
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize((self.img_size + 30, self.img_size + 30)),
transforms.RandomCrop(self.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=0.5, std=0.5)
])
test_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=0.5, std=0.5)
])
self.trainA_loader = paddle.batch(a_reader(shuffle=True,transforms=train_transform), self.batch_size)()
self.trainB_loader = paddle.batch(b_reader(shuffle=True,transforms=train_transform), self.batch_size)()
self.testA_loader = a_test_reader(transforms=test_transform)
self.testB_loader = b_test_reader(transforms=test_transform)
""" Define Generator, Discriminator """
self.genA2B = ResnetGenerator(input_nc=3, output_nc=3, ngf=self.ch, n_blocks=self.n_res, img_size=self.img_size,
light=self.light)
self.genB2A = ResnetGenerator(input_nc=3, output_nc=3, ngf=self.ch, n_blocks=self.n_res, img_size=self.img_size,
light=self.light)
self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
""" Define Loss """
self.L1_loss = L1Loss()
self.MSE_loss = MSELoss()
self.BCE_loss = BCEWithLogitsLoss()
""" Trainer """
self.G_optim = self.optimizer_setting(self.genA2B.parameters() + self.genB2A.parameters())
self.D_optim = self.optimizer_setting(self.disGA.parameters() + self.disGB.parameters() + self.disLA.parameters() + self.disLB.parameters())
""" Define Rho clipper to constraint the value of rho in AdaILN and ILN"""
self.Rho_clipper = RhoClipper(0, 1)
def train(self):
self.genA2B.train(), self.genB2A.train(), self.disGA.train(), self.disGB.train(), self.disLA.train(), self.disLB.train()
start_iter = 1
if self.resume:
model_list = os.listdir(os.path.join(self.result_dir, self.dataset, 'model'))
if not len(model_list) == 0:
model_list.sort()
iter = int(model_list[-1])
print("[*]load %d"%(iter))
self.load(os.path.join(self.result_dir, self.dataset, 'model'), iter)
print("[*] Load SUCCESS")
# training loop
print('training start !')
start_time = time.time()
for step in range(start_iter, self.iteration + 1):
real_A = next(self.trainA_loader)
real_B = next(self.trainB_loader)
real_A = np.array([real_A[0].reshape(3, 256, 256)]).astype("float32")
real_B = np.array([real_B[0].reshape(3, 256, 256)]).astype("float32")
real_A = to_variable(real_A)
real_B = to_variable(real_B)
# Update D
fake_A2B, _, _ = self.genA2B(real_A)
fake_B2A, _, _ = self.genB2A(real_B)
real_GA_logit, real_GA_cam_logit, _ = self.disGA(real_A)
real_LA_logit, real_LA_cam_logit, _ = self.disLA(real_A)
real_GB_logit, real_GB_cam_logit, _ = self.disGB(real_B)
real_LB_logit, real_LB_cam_logit, _ = self.disLB(real_B)
fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(fake_B2A)
fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(fake_B2A)
fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(fake_A2B)
fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(fake_A2B)
D_ad_loss_GA = self.MSE_loss(real_GA_logit, ones_like(real_GA_logit)) + self.MSE_loss(fake_GA_logit,
zeros_like(
fake_GA_logit))
D_ad_cam_loss_GA = self.MSE_loss(real_GA_cam_logit, ones_like(real_GA_cam_logit)) + self.MSE_loss(
fake_GA_cam_logit, zeros_like(fake_GA_cam_logit))
D_ad_loss_LA = self.MSE_loss(real_LA_logit, ones_like(real_LA_logit)) + self.MSE_loss(fake_LA_logit,
zeros_like(
fake_LA_logit))
D_ad_cam_loss_LA = self.MSE_loss(real_LA_cam_logit, ones_like(real_LA_cam_logit)) + self.MSE_loss(
fake_LA_cam_logit, zeros_like(fake_LA_cam_logit))
D_ad_loss_GB = self.MSE_loss(real_GB_logit, ones_like(real_GB_logit)) + self.MSE_loss(fake_GB_logit,
zeros_like(
fake_GB_logit))
D_ad_cam_loss_GB = self.MSE_loss(real_GB_cam_logit, ones_like(real_GB_cam_logit)) + self.MSE_loss(
fake_GB_cam_logit, zeros_like(fake_GB_cam_logit))
D_ad_loss_LB = self.MSE_loss(real_LB_logit, ones_like(real_LB_logit)) + self.MSE_loss(fake_LB_logit,
zeros_like(
fake_LB_logit))
D_ad_cam_loss_LB = self.MSE_loss(real_LB_cam_logit, ones_like(real_LB_cam_logit)) + self.MSE_loss(
fake_LB_cam_logit, zeros_like(fake_LB_cam_logit))
D_loss_A = self.adv_weight * (D_ad_loss_GA + D_ad_cam_loss_GA + D_ad_loss_LA + D_ad_cam_loss_LA)
D_loss_B = self.adv_weight * (D_ad_loss_GB + D_ad_cam_loss_GB + D_ad_loss_LB + D_ad_cam_loss_LB)
Discriminator_loss = D_loss_A + D_loss_B
Discriminator_loss.backward()
self.D_optim.minimize(Discriminator_loss)
self.genB2A.clear_gradients()
self.genA2B.clear_gradients()
self.disGA.clear_gradients()
self.disLA.clear_gradients()
self.disGB.clear_gradients()
self.disLB.clear_gradients()
self.D_optim.clear_gradients()
# Update G
fake_A2B, fake_A2B_cam_logit, _ = self.genA2B(real_A)
fake_B2A, fake_B2A_cam_logit, _ = self.genB2A(real_B)
fake_A2B2A, _, _ = self.genB2A(fake_A2B)
fake_B2A2B, _, _ = self.genA2B(fake_B2A)
fake_A2A, fake_A2A_cam_logit, _ = self.genB2A(real_A)
fake_B2B, fake_B2B_cam_logit, _ = self.genA2B(real_B)
fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(fake_B2A)
fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(fake_B2A)
fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(fake_A2B)
fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(fake_A2B)
G_ad_loss_GA = self.MSE_loss(fake_GA_logit, ones_like(fake_GA_logit))
G_ad_cam_loss_GA = self.MSE_loss(fake_GA_cam_logit, ones_like(fake_GA_cam_logit))
G_ad_loss_LA = self.MSE_loss(fake_LA_logit, ones_like(fake_LA_logit))
G_ad_cam_loss_LA = self.MSE_loss(fake_LA_cam_logit, ones_like(fake_LA_cam_logit))
G_ad_loss_GB = self.MSE_loss(fake_GB_logit, ones_like(fake_GB_logit))
G_ad_cam_loss_GB = self.MSE_loss(fake_GB_cam_logit, ones_like(fake_GB_cam_logit))
G_ad_loss_LB = self.MSE_loss(fake_LB_logit, ones_like(fake_LB_logit))
G_ad_cam_loss_LB = self.MSE_loss(fake_LB_cam_logit, ones_like(fake_LB_cam_logit))
G_recon_loss_A = self.L1_loss(fake_A2B2A, real_A)
G_recon_loss_B = self.L1_loss(fake_B2A2B, real_B)
G_identity_loss_A = self.L1_loss(fake_A2A, real_A)
G_identity_loss_B = self.L1_loss(fake_B2B, real_B)
G_cam_loss_A = self.BCE_loss(fake_B2A_cam_logit,
ones_like(fake_B2A_cam_logit)) + self.BCE_loss(
fake_A2A_cam_logit, zeros_like(fake_A2A_cam_logit))
G_cam_loss_B = self.BCE_loss(fake_A2B_cam_logit,
ones_like(fake_A2B_cam_logit)) + self.BCE_loss(
fake_B2B_cam_logit, zeros_like(fake_B2B_cam_logit))
G_loss_A = self.adv_weight * (
G_ad_loss_GA + G_ad_cam_loss_GA + G_ad_loss_LA + G_ad_cam_loss_LA) + self.cycle_weight * G_recon_loss_A + self.identity_weight * G_identity_loss_A + self.cam_weight * G_cam_loss_A
G_loss_B = self.adv_weight * (
G_ad_loss_GB + G_ad_cam_loss_GB + G_ad_loss_LB + G_ad_cam_loss_LB) + self.cycle_weight * G_recon_loss_B + self.identity_weight * G_identity_loss_B + self.cam_weight * G_cam_loss_B
Generator_loss = G_loss_A + G_loss_B
Generator_loss.backward()
self.G_optim.minimize(Generator_loss)
self.genB2A.clear_gradients()
self.genA2B.clear_gradients()
self.disGA.clear_gradients()
self.disLA.clear_gradients()
self.disGB.clear_gradients()
self.disLB.clear_gradients()
self.G_optim.clear_gradients()
self.Rho_clipper(self.genA2B)
self.Rho_clipper(self.genB2A)
print("[%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (step, self.iteration, time.time() - start_time, Discriminator_loss, Generator_loss))
if step % self.print_freq == 0:
train_sample_num = 5
test_sample_num = 5
A2B = np.zeros((self.img_size * 7, 0, 3))
B2A = np.zeros((self.img_size * 7, 0, 3))
self.genA2B.eval(), self.genB2A.eval(), self.disGA.eval(), self.disGB.eval(), self.disLA.eval(), self.disLB.eval()
for _ in range(train_sample_num):
real_A = next(self.trainA_loader)
real_B = next(self.trainB_loader)
real_A = np.array([real_A[0].reshape(3, 256, 256)]).astype("float32")
real_B = np.array([real_B[0].reshape(3, 256, 256)]).astype("float32")
real_A = to_variable(real_A)
real_B = to_variable(real_B)
fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)
fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)
fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)
fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)
fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A)
fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B)
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))),
cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1)
B2A = np.concatenate((B2A, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_B[0]))),
cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))),
cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0])))), 0)), 1)
for _ in range(test_sample_num):
real_A = next(self.testA_loader())
real_B = next(self.testB_loader())
real_A = np.array([real_A[0].reshape(3, 256, 256)]).astype("float32")
real_B = np.array([real_B[0].reshape(3, 256, 256)]).astype("float32")
real_A = to_variable(real_A)
real_B = to_variable(real_B)
fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)
fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)
fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)
fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)
fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A)
fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B)
A2B = np.concatenate((A2B, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_A[0]))),
cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))),
cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)), 1)
B2A = np.concatenate((B2A, np.concatenate((RGB2BGR(tensor2numpy(denorm(real_B[0]))),
cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))),
cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0])))), 0)), 1)
cv2.imwrite(os.path.join(self.result_dir, self.dataset, 'img', 'A2B_%07d.png' % step), A2B * 255.0)
cv2.imwrite(os.path.join(self.result_dir, self.dataset, 'img', 'B2A_%07d.png' % step), B2A * 255.0)
self.genA2B.train(), self.genB2A.train(), self.disGA.train(), self.disGB.train(), self.disLA.train(), self.disLB.train()
if step % self.save_freq == 0:
self.save(os.path.join(self.result_dir, self.dataset, 'model'), step)
if step % 1000 == 0:
fluid.save_dygraph(self.genA2B.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/genA2B"))
fluid.save_dygraph(self.genB2A.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/genB2A"))
fluid.save_dygraph(self.disGA.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/disGA"))
fluid.save_dygraph(self.disGB.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/disGB"))
fluid.save_dygraph(self.disLA.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/disLA"))
fluid.save_dygraph(self.disLB.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/disLB"))
fluid.save_dygraph(self.D_optim.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/D_optim"))
fluid.save_dygraph(self.G_optim.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/G_optim"))
fluid.save_dygraph(self.genA2B.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/D_optim"))
fluid.save_dygraph(self.genB2A.state_dict(),
os.path.join(self.result_dir, self.dataset + "/latest/new/G_optim"))
def save(self, result_dir, step):
fluid.save_dygraph(self.genA2B.state_dict(), os.path.join(result_dir, "{}/genA2B".format(step)))
fluid.save_dygraph(self.genB2A.state_dict(), os.path.join(result_dir, "{}/genB2A".format(step)))
fluid.save_dygraph(self.disGA.state_dict(), os.path.join(result_dir, "{}/disGA".format(step)))
fluid.save_dygraph(self.disGB.state_dict(), os.path.join(result_dir, "{}/disGB".format(step)))
fluid.save_dygraph(self.disLA.state_dict(), os.path.join(result_dir, "{}/disLA".format(step)))
fluid.save_dygraph(self.disLB.state_dict(), os.path.join(result_dir, "{}/disLB".format(step)))
fluid.save_dygraph(self.genA2B.state_dict(), os.path.join(result_dir, "{}/D_optim".format(step)))
fluid.save_dygraph(self.genB2A.state_dict(), os.path.join(result_dir, "{}/G_optim".format(step)))
fluid.save_dygraph(self.D_optim.state_dict(), os.path.join(result_dir, "{}/D_optim".format(step)))
fluid.save_dygraph(self.G_optim.state_dict(), os.path.join(result_dir, "{}/G_optim".format(step)))
def load(self, dir, step):
genA2B, _ = fluid.load_dygraph(os.path.join(dir, "{}/genA2B".format(step)))
genB2A, _ = fluid.load_dygraph(os.path.join(dir, "{}/genB2A".format(step)))
disGA, _ = fluid.load_dygraph(os.path.join(dir, "{}/disGA".format(step)))
disGB, _ = fluid.load_dygraph(os.path.join(dir, "{}/disGB".format(step)))
disLA, _ = fluid.load_dygraph(os.path.join(dir, "{}/disLA".format(step)))
disLB, _ = fluid.load_dygraph(os.path.join(dir, "{}/disLB".format(step)))
_, D_optim = fluid.load_dygraph(os.path.join(dir, "{}/D_optim".format(step)))
_, G_optim = fluid.load_dygraph(os.path.join(dir, "{}/G_optim".format(step)))
self.genA2B.load_dict(genA2B)
self.genB2A.load_dict(genB2A)
self.disGA.load_dict(disGA)
self.disGB.load_dict(disGB)
self.disLA.load_dict(disLA)
self.disLB.load_dict(disLB)
self.G_optim.set_dict(G_optim)
self.D_optim.set_dict(D_optim)
def test(self):
model_list = os.listdir(os.path.join(self.result_dir, self.dataset, 'model'))
if not len(model_list) == 0:
model_list.sort()
iter = int(model_list[-1])
self.load(os.path.join(self.result_dir, self.dataset, 'model'), iter)
print("[*] Load SUCCESS")
else:
print("[*] Load FAILURE")
return
self.genA2B.eval(), self.genB2A.eval()
for n, (real_A, _) in enumerate(self.testA_loader()):
real_A = np.array([real_A.reshape(3, 256, 256)]).astype("float32")
real_A = to_variable(real_A)
fake_A2B, _, fake_A2B_heatmap = self.genA2B(real_A)
fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(fake_A2B)
fake_A2A, _, fake_A2A_heatmap = self.genB2A(real_A)
A2B = np.concatenate(
(RGB2BGR(tensor2numpy(denorm(real_A[0]))), cam(tensor2numpy(fake_A2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2A[0]))), cam(tensor2numpy(fake_A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B[0]))),
cam(tensor2numpy(fake_A2B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_A2B2A[0])))), 0)
cv2.imwrite(os.path.join(self.result_dir, self.dataset, 'test', 'A2B_%d.png' % (n + 1)), A2B * 255.0)
for n, (real_B, _) in enumerate(self.testB_loader()):
real_B = np.array([real_B.reshape(3, 256, 256)]).astype("float32")
real_B = to_variable(real_B)
fake_B2A, _, fake_B2A_heatmap = self.genB2A(real_B)
fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(fake_B2A)
fake_B2B, _, fake_B2B_heatmap = self.genA2B(real_B)
B2A = np.concatenate(
(RGB2BGR(tensor2numpy(denorm(real_B[0]))), cam(tensor2numpy(fake_B2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2B[0]))), cam(tensor2numpy(fake_B2A_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A[0]))),
cam(tensor2numpy(fake_B2A2B_heatmap[0]), self.img_size),
RGB2BGR(tensor2numpy(denorm(fake_B2A2B[0])))), 0)
cv2.imwrite(os.path.join(self.result_dir, self.dataset, 'test', 'B2A_%d.png' % (n + 1)), B2A * 255.0)
def test_change(self):
model_list = os.listdir(os.path.join(self.result_dir, self.dataset, 'model'))
if not len(model_list) == 0:
model_list.sort()
iter = int(model_list[-1].split('/')[-1])
self.load(os.path.join(self.result_dir, self.dataset, 'model'), iter)
print("[*] Load SUCCESS")
else:
print("[*] Load FAILURE")
return
self.genA2B.eval(), self.genB2A.eval()
for n, (real_A, fname) in enumerate(self.testA_loader()):
real_A = np.array([real_A[0].reshape(3, 256, 256)]).astype("float32")
real_A = to_variable(real_A)
fake_A2B, _, _ = self.genA2B(real_A)
A2B = RGB2BGR(tensor2numpy(denorm(fake_A2B[0])))
| |
commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( ooIi1iiI11I . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 24 - 24: o0oOOo0O0Ooo * I11i . I1IiiI
if ( ooIi1iiI11I . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 52 - 52: OoooooooOO * I1Ii111 % II111iiii
I1i1iiI11ii = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( I1i1iiI11ii == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 51 - 51: OoO0O00 - Oo0Ooo . I11i / oO0o . II111iiii * I1Ii111
I1i1iiI11ii = I1i1iiI11ii . split ( "inet " ) [ 1 ]
I1i1iiI11ii = I1i1iiI11ii . split ( "/" ) [ 0 ]
if 40 - 40: I1Ii111
if 88 - 88: i11iIiiIii * O0 . i11iIiiIii . o0oOOo0O0Ooo . OoooooooOO
if 94 - 94: ooOoO0o / oO0o . iII111i % IiII - I11i
if 61 - 61: OoooooooOO % OoO0O00 . OoO0O00 - I11i
if 35 - 35: oO0o . Ii1I
if 71 - 71: iIii1I11I1II1 / I1ii11iIi11i + OoooooooOO . ooOoO0o
if 63 - 63: i11iIiiIii % I1Ii111 % IiII * i1IIi + I1Ii111 + I1Ii111
O0o0 = [ ]
IiI1i = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for i11ii in IiI1i :
if ( i11ii . find ( "vlan4094" ) == - 1 ) : continue
if ( i11ii . find ( "(incomplete)" ) == - 1 ) : continue
I1i1i1iIIiI11 = i11ii . split ( " " ) [ 0 ]
O0o0 . append ( I1i1i1iIIiI11 )
if 29 - 29: I1IiiI - OOooOOo
if 83 - 83: OoOoOO00 * oO0o . OOooOOo - OoO0O00
I1i1i1iIIiI11 = None
O00o0Oo = I1i1iiI11ii
I1i1iiI11ii = I1i1iiI11ii . split ( "." )
for oO in range ( 1 , 255 ) :
I1i1iiI11ii [ 3 ] = str ( oO )
I1Iii1I = "." . join ( I1i1iiI11ii )
if ( I1Iii1I in O0o0 ) : continue
if ( I1Iii1I == O00o0Oo ) : continue
I1i1i1iIIiI11 = I1Iii1I
break
if 73 - 73: I1ii11iIi11i / iII111i / Oo0Ooo
if ( I1i1i1iIIiI11 == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 85 - 85: Ii1I
return
if 67 - 67: i11iIiiIii / II111iiii . i11iIiiIii * i11iIiiIii / ooOoO0o . oO0o
if 46 - 46: oO0o . OoO0O00 - iIii1I11I1II1 . IiII
if 52 - 52: i11iIiiIii / O0 + oO0o . I11i
if 73 - 73: OoooooooOO / I1IiiI % Oo0Ooo . oO0o + OoooooooOO
if 84 - 84: I1ii11iIi11i - OOooOOo * II111iiii
if 28 - 28: I1ii11iIi11i . oO0o / o0oOOo0O0Ooo - iII111i
if 65 - 65: I1ii11iIi11i * OOooOOo * ooOoO0o + oO0o - OOooOOo
ooOOo0Oo000 = i1IIIIi1Ii111 . split ( "." )
i1iiiIIiI = lisp_hex_string ( ooOOo0Oo000 [ 1 ] ) . zfill ( 2 )
i1iIiII1 = lisp_hex_string ( ooOOo0Oo000 [ 2 ] ) . zfill ( 2 )
OO000oOooO00 = lisp_hex_string ( ooOOo0Oo000 [ 3 ] ) . zfill ( 2 )
o0O0oO0 = "00:00:00:{}:{}:{}" . format ( i1iiiIIiI , i1iIiII1 , OO000oOooO00 )
IIi1iii = "0000.00{}.{}{}" . format ( i1iiiIIiI , i1iIiII1 , OO000oOooO00 )
oo0O0oOOo0O = "arp -i vlan4094 -s {} {}" . format ( I1i1i1iIIiI11 , o0O0oO0 )
os . system ( oo0O0oOOo0O )
if 16 - 16: II111iiii % oO0o
if 59 - 59: iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo / OoO0O00
if 55 - 55: i11iIiiIii
iiIIi1Iii1 = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( IIi1iii , i1IIIIi1Ii111 )
if 25 - 25: OoOoOO00 * OoOoOO00 * Oo0Ooo / iIii1I11I1II1
lisp_send_to_arista ( iiIIi1Iii1 , None )
if 63 - 63: IiII - ooOoO0o % OoO0O00 * i11iIiiIii % OOooOOo
if 90 - 90: oO0o / Oo0Ooo + iII111i - O0
if 76 - 76: ooOoO0o + IiII / I1ii11iIi11i . iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 * OOooOOo % i1IIi
if 1 - 1: o0oOOo0O0Ooo + Ii1I - o0oOOo0O0Ooo % I1ii11iIi11i
o0oOIIiIi11I = "ip route add {} via {}" . format ( iIiII11O00 , I1i1i1iIIiI11 )
os . system ( o0oOIIiIi11I )
if 22 - 22: ooOoO0o . ooOoO0o % i1IIi * II111iiii * IiII
lprint ( "Hardware programmed with commands:" )
o0oOIIiIi11I = o0oOIIiIi11I . replace ( iIiII11O00 , green ( iIiII11O00 , False ) )
lprint ( " " + o0oOIIiIi11I )
lprint ( " " + oo0O0oOOo0O )
iiIIi1Iii1 = iiIIi1Iii1 . replace ( i1IIIIi1Ii111 , red ( i1IIIIi1Ii111 , False ) )
lprint ( " " + iiIIi1Iii1 )
return
if 6 - 6: II111iiii . iII111i % I1ii11iIi11i + IiII / I11i
if 35 - 35: iII111i * Oo0Ooo
if 61 - 61: I1Ii111 - I1IiiI - I11i * OoO0O00 - O0 + iII111i
if 9 - 9: IiII - OOooOOo / O0 + i1IIi . O0 % oO0o
if 57 - 57: i1IIi . OOooOOo
if 72 - 72: ooOoO0o / I1IiiI - ooOoO0o * OoO0O00 . OOooOOo
if 1 - 1: o0oOOo0O0Ooo + I1Ii111 + OoO0O00 * OOooOOo / I1Ii111 % i11iIiiIii
def lisp_clear_hardware_walk ( mc , parms ) :
O00OOOoOoooo = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( O00OOOoOoooo ) )
return ( [ True , None ] )
if 49 - 49: OOooOOo - oO0o
if 73 - 73: o0oOOo0O0Ooo . I1IiiI - I11i . ooOoO0o % II111iiii . OoooooooOO
if 8 - 8: OoooooooOO
if 92 - 92: ooOoO0o + IiII * II111iiii
if 41 - 41: I1IiiI + OoOoOO00 . OOooOOo
if 57 - 57: II111iiii . iIii1I11I1II1
if 32 - 32: o0oOOo0O0Ooo
if 75 - 75: I1IiiI . II111iiii - iII111i % IiII * OoO0O00 % ooOoO0o
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
if 38 - 38: I1IiiI / OoooooooOO
IiiI11iI1I = bold ( "User cleared" , False )
o0OO0oooo = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( IiiI11iI1I , o0OO0oooo ) )
if 43 - 43: OoO0O00 - I1Ii111 % OoooooooOO % I1ii11iIi11i . OoOoOO00
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 87 - 87: OOooOOo
lisp_map_cache = lisp_cache ( )
if 60 - 60: ooOoO0o * o0oOOo0O0Ooo . OoO0O00 * iII111i * oO0o * i1IIi
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
lisp_rloc_probe_list = { }
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if 92 - 92: OoOoOO00 + oO0o
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if 12 - 12: I1Ii111 * OOooOOo
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
lisp_rtr_list = { }
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
lisp_process_data_plane_restart ( True | |
<reponame>leschzinerlab/myami-3.2-freeHand
#!/usr/bin/env python
#
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from leginon import leginondata
import numpy
import scipy.ndimage
from pyami import arraystats, imagefun
import time
import cameraclient
import itertools
import leginon.session
import leginon.leginonconfig
import os
import sys
import numextension
ref_cache = {}
idcounter = itertools.cycle(range(100))
class CorrectorClient(cameraclient.CameraClient):
def __init__(self):
cameraclient.CameraClient.__init__(self)
self.max_badpixels = 800
def acquireCorrectedCameraImageData(self, channel=0, **kwargs):
imagedata = self.acquireCameraImageData(**kwargs)
self.correctCameraImageData(imagedata, channel)
return imagedata
def researchCorrectorImageData(self, type, scopedata, cameradata, channel):
if type == 'dark':
imagetemp = leginondata.DarkImageData()
elif type == 'bright':
imagetemp = leginondata.BrightImageData()
elif type == 'norm':
imagetemp = leginondata.NormImageData()
else:
return None
## query only based on certain camera parameters, not all
imagetemp['camera'] = leginondata.CameraEMData()
for key in ('ccdcamera','dimension','binning','offset','gain index'):
imagetemp['camera'][key] = cameradata[key]
# query only based on certain scope parameters, not all
imagetemp['scope'] = leginondata.ScopeEMData()
for key in ('tem', 'high tension'):
imagetemp['scope'][key] = scopedata[key]
imagetemp['channel'] = channel
try:
ref = imagetemp.query(results=1)
except Exception, e:
self.logger.warning('Reference image query failed: %s' % e)
ref = None
if ref:
ref = ref[0]
else:
return None
if ref['image'] is None:
return None
shape = ref['image'].shape
dim = ref['camera']['dimension']
if dim['x'] != shape[1] or dim['y'] != shape[0]:
self.logger.error('%s: bad shape: %s' % (ref['filename'], shape,))
return None
return ref
def getBrightImageFromNorm(self,normdata):
'''
Get bright image used to produce the norm image
This is made to be back compatible to early leginondata that
has no bright image association but would be the closest in time before
the norm was calculated
'''
if normdata is None:
return None
# newer leginon data will have bright image associated with norm image
if 'bright' in normdata.keys() and normdata['bright'] is not None:
return normdata['bright']
# bright image may have the same CameraEMData
q = leginondata.BrightImageData(camera=normdata['camera'])
brightresults = q.query(results=1)
if brightresults:
return brightresults[0]
# otherwise need to look up timestamp
timestamp = normdata.timestamp
normcam = normdata['camera']
qcam = leginondata.CameraEMData(dimension=normcam['dimension'],
offset=normcam['offset'], binning=normcam['binning'],
ccdcamera=normcam['ccdcamera'])
qcam['exposure type'] = 'normal'
qcam['energy filtered'] = normcam['energy filtered']
qcam['gain index'] = normcam['gain index']
normscope = normdata['scope']
qscope = leginondata.ScopeEMData(tem=normscope['tem'])
qscope['high tension'] = normscope['high tension']
q = leginondata.BrightImageData(camera=qcam,scope=qscope,channel=normdata['channel'])
brightlist = q.query()
for brightdata in brightlist:
if brightdata.timestamp < timestamp:
break
return brightdata
def createRefQuery(self,reftype,qcam,qscope,channel):
if reftype == 'norm':
q = leginondata.NormImageData(camera=qcam,scope=qscope,channel=channel)
elif reftype == 'bright':
q = leginondata.BrightImageData(camera=qcam,scope=qscope,channel=channel)
elif reftype == 'dark':
q = leginondata.DarkImageData(camera=qcam,scope=qscope,channel=channel)
return q
def getAlternativeChannelReference(self,reftype,refdata):
if refdata is None:
return None
altnormdata = self.getAlternativeChannelNorm(refdata)
if reftype == 'norm':
return altnormdata
elif reftype == 'bright':
return altnormdata['bright']
elif reftype == 'dark':
return altnormdata['dark']
return q
def getAlternativeChannelNorm(self,refdata):
'''
Get norm image data of the other channel closest in time
'''
if refdata is None:
return None
reftype = 'norm'
timestamp = refdata.timestamp
refcam = refdata['camera']
qcam = leginondata.CameraEMData(dimension=refcam['dimension'],
offset=refcam['offset'], binning=refcam['binning'],
ccdcamera=refcam['ccdcamera'])
qcam['exposure time'] = refcam['exposure time']
qcam['energy filtered'] = refcam['energy filtered']
qcam['gain index'] = refcam['gain index']
refscope = refdata['scope']
qscope = leginondata.ScopeEMData(tem=refscope['tem'])
qscope['high tension'] = refscope['high tension']
altchannel = int(refdata['channel'] == 0)
q = self.createRefQuery(reftype,qcam,qscope,altchannel)
reflist = q.query()
if len(reflist) == 0:
# Not to query exposure time if none found
qcam['exposure time'] = None
q = self.createRefQuery(reftype,qcam,qscope,altchannel)
reflist = q.query()
if len(reflist) == 0:
#no switching, no alternative channel found
return refdata
for newrefdata in reflist:
if newrefdata.timestamp < timestamp:
break
before_ref = newrefdata
reflist.reverse()
for newrefdata in reflist:
if newrefdata.timestamp > timestamp:
break
after_ref = newrefdata
if after_ref.timestamp - timestamp > timestamp - before_ref.timestamp:
return before_ref
else:
return after_ref
def formatCorrectorKey(self, key):
try:
if key[6] == 'dark':
exptype = 'dark reference image'
elif key[6] == 'bright':
exptype = 'bright reference image'
elif key[6] == 'norm':
exptype = 'normalization image'
else:
exptype = key[6]
except IndexError:
exptype = 'unknown image'
s = '%s, %sV, size %sx%s, bin %sx%s, offset (%s,%s), channel %s, gain %s'
try:
return s % (exptype, key[8], key[0], key[1], key[2], key[3], key[4], key[5], key[9], key[10])
except IndexError:
return str(key)
def makeCorrectorKey(self, type, scopedata, cameradata, channel):
mylist = []
for param in ('dimension', 'binning', 'offset'):
values = cameradata[param]
if values is None:
valuetuple = (None,None)
else:
valuetuple = (values['x'],values['y'])
mylist.extend( valuetuple )
mylist.append(type)
mylist.append(cameradata['ccdcamera']['name'])
mylist.append(scopedata['high tension'])
mylist.append(channel)
mylist.append(cameradata['gain index'])
return tuple(mylist)
def getCorrectorImageFromCache(self, type, scopedata, cameradata, channel):
key = self.makeCorrectorKey(type, scopedata, cameradata, channel)
cachedim = ref_cache[key]
return cachedim
def correctorImageExists(self, type, scopedata, cameradata, channel):
ref = self.researchCorrectorImageData(type, scopedata, cameradata, channel)
if not ref:
return False
fileref = ref.special_getitem('image', dereference=False)
if isinstance(fileref, numpy.ndarray):
return True
return fileref.exists()
def retrieveCorrectorImageData(self, type, scopedata, cameradata, channel):
key = self.makeCorrectorKey(type, scopedata, cameradata, channel)
## another way to do the cache would be to use the local
## data keeper
## try to use reference image from cache
try:
return self.getCorrectorImageFromCache(type, scopedata, cameradata, channel)
except KeyError:
self.logger.info('Loading %s...' % self.formatCorrectorKey(key))
## use reference image from database
ref = self.researchCorrectorImageData(type, scopedata, cameradata, channel)
if ref:
## make it float to do float math later
## image = numpy.asarray(ref['image'], numpy.float32)
ref_cache[key] = ref
return ref
else:
return None
def prepareDark(self, dark, raw):
'''
For cameras that return a sum of n frames:
Rescale the dark image to be same number of frames as raw image.
Assuming exposure time of each frame (or frame time) is constant.
'''
darkarray = dark['image']
try:
## NEED TO FIX
dark_exptime = len(dark['use frames']) * float(dark['camera']['frame time'])
except:
return darkarray
try:
raw_exptime = len(raw['use frames']) * float(raw['camera']['frame time'])
except:
return darkarray
if dark_exptime == 0.0:
return darkarray
multiplier = float(raw_exptime) / float(dark_exptime)
if multiplier != 1.0:
darkarray = multiplier * darkarray
return darkarray
def calculateDarkScale(self,rawarray,darkarray):
'''
Calculate the scale used for darkarray using
Gram-Schmidt process for very low signal-to-noise
ratio such as DD raw frames as suggested by <NAME>.
Need to apply the same factor used in data dark subtraction as
in dark subtraction for the normarray
'''
onedshape = rawarray.shape[0] * rawarray.shape[1]
a = rawarray.reshape(onedshape)
b = darkarray.reshape(onedshape)
a_std = numextension.allstats(a,std=True)['std']
b_std = numextension.allstats(b,std=True)['std']
if b_std == 0:
return 1.0
ab_corr_coef = numpy.corrcoef(a,b)[(0,1)]
dark_scale = ab_corr_coef * a_std / b_std
return dark_scale
def calculateNorm(self,brightarray,darkarray,scale=None):
'''
caculating norm array from bright and dark array. A scale
for the dark array can be specified or calculated. For most
case, scale of 1 is good enough if the exposure time of the
bright and dark are equal. If Gram-Schmidt process is used
to calculate dark_scale on the data, normarray need to be
scaled the same by specifying it.
'''
if scale:
dark_scale = scale
else:
dark_scale = 1.0
normarray = brightarray - dark_scale * darkarray
normarray = numpy.asarray(normarray, numpy.float32)
# division may result infinity or zero division
# so make sure there are no zeros in norm
normarray = numpy.clip(normarray, 0.001, sys.maxint)
stats = numextension.allstats(normarray, mean=True)
normavg = stats['mean']
normarray = normavg / normarray
# Avoid over correcting dead pixels
normarray = numpy.ma.masked_greater(normarray,20).filled(1)
return normarray
def normalizeImageArray(self, rawarray, darkarray, normarray, is_counting=False):
diff = rawarray - darkarray
r = diff * normarray
## remove nan and inf
r = numpy.where(numpy.isfinite(r), r, 0)
return r
def normalizeCameraImageData(self, imagedata, channel):
cameradata = imagedata['camera']
scopedata = imagedata['scope']
dark = self.retrieveCorrectorImageData('dark', scopedata, cameradata, channel)
norm = self.retrieveCorrectorImageData('norm', scopedata, cameradata, channel)
if dark is None or norm is None:
self.logger.warning('Cannot find references, image will not be normalized')
return
rawarray = imagedata['image']
darkarray = self.prepareDark(dark, imagedata)
normarray = norm['image']
r = self.normalizeImageArray(rawarray,darkarray,normarray, 'GatanK2' in cameradata['ccdcamera']['name'])
imagedata['image'] = r
imagedata['dark'] = dark
imagedata['bright'] = norm['bright']
imagedata['norm'] = norm
imagedata['correction channel'] = channel
def denormalizeCameraImageData(self, imagedata):
'''
reverse the normalization to create a raw image
'''
dark = imagedata['dark']
norm = imagedata['norm']
if dark is None or norm is None:
raise RuntimeError('uncorrected cannot be denormalized')
corrected = imagedata['image']
normarray = norm['image']
darkarray = dark['image']
raw = corrected / normarray
raw = numpy.where(numpy.isfinite(raw), raw, 0)
raw = raw + darkarray
imagedata['image'] = raw
imagedata['dark'] = None
imagedata['bright'] = None
imagedata['norm'] = None
imagedata['correction channel'] = None
def reverseCorrectorChannel(self, imagedata):
oldchannel = imagedata['correction channel']
if oldchannel == 1:
newchannel = 0
elif oldchannel == 0:
newchannel = 1
else:
raise RuntimeError('cannot reverse unknown channel')
newimagedata = imagedata.copy()
self.denormalizeCameraImageData(newimagedata)
self.normalizeCameraImageData(newimagedata, newchannel)
return newimagedata
def correctCameraImageData(self, imagedata, channel):
'''
this puts an image through a pipeline of corrections
'''
if imagedata['image'] is None:
return
if not 'system corrected' in imagedata['camera'].keys() or not imagedata['camera']['system corrected']:
try:
self.normalizeCameraImageData(imagedata, channel)
imagedata['correction channel'] = channel
except Exception, e:
self.logger.error('Normalize failed: %s' % e)
self.logger.warning('Image will not be normalized')
cameradata = imagedata['camera']
plan, plandata = self.retrieveCorrectorPlan(cameradata)
# save corrector plan for easy post-processing of raw frames
imagedata['corrector plan'] = plandata
if plan is not None:
self.fixBadPixels(imagedata['image'], plan)
pixelmax = imagedata['camera']['ccdcamera']['pixelmax']
imagedata['image'] = numpy.asarray(imagedata['image'], numpy.float32)
if pixelmax is not None:
imagedata['image'] = numpy.clip(imagedata['image'], 0, pixelmax)
if plan is not None and plan['despike']:
self.logger.debug('Despiking...')
nsize = plan['despike size']
thresh = plan['despike threshold']
imagefun.despike(imagedata['image'], nsize, thresh)
self.logger.debug('Despiked')
'''
final = numpy.asarray(clipped, numpy.float32)
return final
'''
def reseachCorrectorPlan(self, cameradata):
qcamera = leginondata.CameraEMData()
# Fix Me: Ignore gain index for now because camera setting does not have it when theplan is saved.
for key in ('ccdcamera','dimension','binning','offset'):
qcamera[key] = cameradata[key]
qplan = leginondata.CorrectorPlanData()
qplan['camera'] = qcamera
plandatalist = qplan.query()
if plandatalist:
return plandatalist[0]
else:
return None
def retrieveCorrectorPlan(self, cameradata):
plandata = self.reseachCorrectorPlan(cameradata)
return self.formatCorrectorPlan(plandata), plandata
def formatCorrectorPlan(self, plandata=None):
if plandata:
result = {}
result['rows'] = list(plandata['bad_rows'])
result['columns'] = list(plandata['bad_cols'])
result['despike'] = plandata['despike']
result['despike size'] = plandata['despike size']
result['despike threshold'] = plandata['despike threshold']
if plandata['bad_pixels'] is None:
result['pixels'] = []
else:
result['pixels'] = list(plandata['bad_pixels'])
return result
else:
return {'rows': [], 'columns': [], 'pixels': [], 'despike': False, 'despike size': 11, 'despike threshold': 3.5}
def fixBadPixels(self, image, plan):
badrows = plan['rows']
badcols = plan['columns']
badrowscols = [badrows,badcols]
badpixels = plan['pixels']
shape = image.shape
## fix individual pixels (pixels are in x,y format)
## replace each with median of 8 neighbors, however, some neighbors
## are also labeled as bad, so we will not use those in the calculation
if len(badpixels) >= self.max_badpixels:
self.logger.error('Too many (%d) bad pixels will slow down image acquisition' % len(badpixels))
self.logger.warning('Clear bad pixel plan in Corrector to speed up')
for badpixel in badpixels:
badcol,badrow = badpixel
if badcol in badcols or badrow in badrows:
## pixel will be fixed along with entire row/column later
continue
neighbors = []
## d is how far we will go to find good pixels for this calculation
## this is extra paranoia for the case where there is a whole cluster of
## bad pixels. Usually it will only interate once (d=1)
for d in range(1,20):
for r in | |
green]'
fb10_b = '[bold red]Incorrect[/bold red]'
fb10 = [fb10_a, fb10_b]
all_options.append(options10)
all_answers.append(ans10)
all_feedback.append(fb10)
options11 = ['Conda install', 'PIP install', 'APT-get install', 'Easy-install']
ans11 = 'PIP install'
fb11_a = '[bold red]Try again[/bold red]. install conda sets up the environment, not a specific package'
fb11_b = '[bold green]Correct![/bold green] Pip can be used to install Python packages outside of a virtual ' \
'environment like conda. '
fb11_c = '[bold red]Incorrect[/bold red], this command is used in Linux terminals, and is not used for PYNQ installs. '
fb11_d = '[bold red]Incorrect[/bold red]'
fb11 = [fb11_a, fb11_b, fb11_c, fb11_d]
all_options.append(options11)
all_answers.append(ans11)
all_feedback.append(fb11)
options12 = ['ASIC, SoC, FPGA, MPSoC', 'FPGA, SoC, ASIC, MPSoC', 'SoC, ASIC, FPGA, MPSoC', 'ASIC, MPSoC, FPGA, SoC']
ans12 = 'ASIC, SoC, FPGA, MPSoC'
fb12_a = '[bold green]Correct![/bold green] We know ASICs are application specific. The other three descriptions can ' \
'apply to an FPGA, but you can determine the correct answer from the mention of other components and ' \
'multiple processors.'
fb12_b = '[bold red]Incorrect![/bold red] Go back to the beginning of this notebook to review info on these four terms.'
fb12_c = '[bold red]Incorrect![/bold red] Go back to the beginning of this notebook to review info on these four terms.'
fb12_d = '[bold red]Incorrect![/bold red] Go back to the beginning of this notebook to review info on these four terms.'
fb12 = [fb12_a, fb12_b, fb12_c, fb12_d]
all_options.append(options12)
all_answers.append(ans12)
all_feedback.append(fb12)
options13 = ['Verilog', 'JHDL', 'Ruby', 'VHDL']
ans13 = 'Ruby'
fb13_a = '[bold red]Incorrect![/bold red] - look at the examples given in the notebook. Don\'t be afraid to look up a ' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'language that looks unfamiliar to you.'
fb13_c = '[bold green]Correct![/bold green] Ruby is a high-level programming language that isn\'t used in designing ' \
'hardware.'
fb13 = [fb13_a, fb13_a, fb13_c, fb13_a]
all_options.append(options13)
all_answers.append(ans13)
all_feedback.append(fb13)
options14 = ['The size of the FPGA', 'The size of a feature on an FPGA', 'The maximum routing distance between IP',
'The physical size of a processor on an SoC']
ans14 = 'The size of a feature on an FPGA'
fb14_a = '[bold red]Incorrect![/bold red] remember that an FPGA is a silicon component.'
fb14_b = '[bold green]Correct![/bold green] An FPGA [black]\'feature\'[/black] refers to the elements on an FPGA, ' \
'like a transistor, and smaller features means more can be fit in the same space, which is why you hear the ' \
'number growing smaller as newer devices are developed. A higher number of features can imply (though not ' \
'always) higher performance and power.'
fb14_c = '[bold red]Incorrect![/bold red] routing is not often measured and monitored in this way.'
fb14_d = '[bold red]Incorrect![/bold red] not all FPGA devices are SoCs.'
fb14 = [fb14_a, fb14_b, fb14_c, fb14_d]
all_options.append(options14)
all_answers.append(ans14)
all_feedback.append(fb14)
options15 = ['A .tcl script', 'An HDL file', 'An IP block', 'A bitstream']
ans15 = 'A bitstream'
fb15_a = '[bold red]Incorrect![/bold red] -a tcl script is used to rebuild your design, as it includes commands for ' \
'Vivado to use.'
fb15_b = '[bold red]Incorrect![/bold red] - HDL is used when developing the hardware, but is not loaded into the ' \
'device.'
fb15_c = '[bold red]Incorrect![/bold red] - IP are building blocks in your hardware design.'
fb15_d = '[bold green]Correct![/bold green] A bitstream is created based on your design, which is what is loaded onto ' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'the device in order for it to function as the designer intends.'
fb15 = [fb15_a, fb15_b, fb15_c, fb15_d]
all_options.append(options15)
all_answers.append(ans15)
all_feedback.append(fb15)
options16 = ['First Program Gate Array ', 'First Programmable Gate Array ', 'Field Programmable Gate Array',
'Field Program Gate Array']
ans16 = 'Field Programmable Gate Array'
fb16_a = '[bold red]Incorrect![/bold red]'
fb16_b = '[bold red]Incorrect![/bold red]'
fb16_c = '[bold green]Correct.[/bold green]'
fb16_d = '[bold red]Incorrect![/bold red]'
fb16 = [fb16_a, fb16_b, fb16_c, fb16_d]
all_options.append(options16)
all_answers.append(ans16)
all_feedback.append(fb16)
options17 = ['Advanced Speed Integrated Circuit', 'Application Speedy Integrated Circuit',
'Advanced Standard Integrated Circuit', 'Application Specific Integrated Circuit']
ans17 = 'Application Specific Integrated Circuit'
fb17_a = '[bold red]Incorrect![/bold red]'
fb17_b = '[bold red]Incorrect![/bold red]'
fb17_c = '[bold red]Incorrect![/bold red]'
fb17_d = '[bold green]Correct[/bold green]'
fb17 = [fb17_a, fb17_b, fb17_c, fb17_d]
all_options.append(options17)
all_answers.append(ans17)
all_feedback.append(fb17)
options18 = ['A microprocessor is a programmable device which takes some input, performs some logical and arithmetic '
'operations on it and \n produce some desired output. while A microcontroller is a computer which is '
'typically '
'dedicated to a single task. ',
'A microcontroller is a programmable device which takes some input, performs some logical and arithmetic '
'operations on it and produce some desired output. while A microprocessor is a computer which is '
'typically '
'dedicated to a single task. ']
ans18 = 'A microcontroller is a programmable device which takes some input, performs some logical and arithmetic ' \
'operations on it and \n produce some desired output. while A microprocessor is a computer which is typically ' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'' \
'dedicated to a single task. '
fb18_a = '[bold green]Correct.[/bold green] Microcontroller is like Arduino while Microprocessor is like Rasberry pi'
fb18_b = '[bold red]Incorrect![/bold red]'
fb18 = [fb18_a, fb18_b]
all_options.append(options18)
all_answers.append(ans18)
all_feedback.append(fb18)
options19 = ['1. Raspberry Pi Interface, 2. Arduino Interfaces', '1. Arduino Interface, 2. Raspberry Pi Interfaces',
'1.Pmod A interface, 2. Pmod B interfaces', '1.Pmod B interface, 2. Pmod A interfaces']
ans19 = '1. Raspberry Pi Interface, 2. Arduino Interfaces'
fb19_a = '[bold green]Correct[/bold green]'
fb19_b = '[bold red]Incorrect![/bold red]'
fb19_c = '[bold red]Incorrect![/bold red]'
fb19_d = '[bold red]Incorrect![/bold red]'
fb19 = [fb19_a, fb19_b, fb19_c, fb19_d]
all_options.append(options19)
all_answers.append(ans19)
all_feedback.append(fb19)
options20 = ['1.USB 2. SD Card 3. Ethernet 4. Pmod interfaces',
'1. MicroUSB 2. MicroSD card 3. Ethernet 4. Raspberry Pi interfaces',
'1. MicroUSB 2. MicroSD card 3. Ethernet 4. Pmod interfaces',
'1.USB, 2. SD card 3. Ethernet 4. Arduino Interface']
ans20 = '1. MicroUSB 2. MicroSD card 3. Ethernet 4. Pmod interfaces'
fb20_a = '[bold red]Incorrect![/bold red]'
fb20_b = '[bold red]Incorrect![/bold red]'
fb20_c = '[bold green]Correct[/bold green]'
fb20_d = '[bold red]Incorrect![/bold red]'
fb20 = [fb20_a, fb20_b, fb20_c, fb20_d]
all_options.append(options20)
all_answers.append(ans20)
all_feedback.append(fb20)
ard_desc = 'An Arduino comes with a microcontroller. Unlike a Raspberry Pi, it does not have an operating system and ' \
'can only run programs that were created and compiled specifically for Arduino boards, mostly written in ' \
'C++. They\'re good for low-power applications and driving hardware, and are relatively easy to use, ' \
'which has made them popular for robotics projects and teaching.'
rpi_desc = 'Raspberry Pis come with a microprocessor. They\'re pretty powerful, and have a variety of I/O ' \
'options--with an RPi, you can use an SD cards, USB devices, HDMI displays, etc. You\'re also free to run ' \
'it with almost any programming language.'
fpga_desc = 'FPGA stands for Field Programmable Gate Array. As you saw from the presentation, they are usable only ' \
'after they\'ve been configured, creating a device whose architecture is customized for your task. This ' \
'makes them efficient, adaptable, and useful for plenty of applications--medical devices, spacecrafts, ' \
'self-driving cars, etc. Shown below is an evaluation board, with a ZYNQberry FPGA.'
platform_options = [('Arduino', 0), ('Raspberry Pi', 1), ('FPGA', 2)]
p_val = 0
p_desc = 'Platform'
platform_img_paths = ['images/arduino_uno.jpg', 'images/rpi.jpg', 'images/fpga.jpg']
plat_imgs = []
for k in platform_img_paths:
file = open(k, "rb")
image = file.read()
plat_imgs.append(image)
platform_descs = [ard_desc, rpi_desc, fpga_desc]
platforms = widgets.Dropdown(options=platform_options, value=p_val, description=p_desc, disabled=False, )
# Use with an interact widget to create a dropdown menu that displays an image and description based on dropdown
# selection.
# dd_option: Dropdown input img_paths: list of image paths corresponding with index of dropdown selection
# desc_list: list of description strings corresponding with index of dropdown selection
def platform_dropdown(dd_option, img_wgs, desc_list):
image = img_wgs[dd_option]
desc = desc_list[dd_option]
img = widgets.Image(value=image, format='png', width=300, align='center')
display(Markdown('\n' + desc))
display(img)
return
def pop_platforms():
t = widgets.interactive(platform_dropdown, desc_list=fixed(platform_descs), img_wgs=fixed(plat_imgs),
dd_option=platforms)
return t
pynq_imgs_path = ['images/PynqLabel1.JPG', 'images/PynqLabel2.JPG']
pynq_imgs = []
for j in pynq_imgs_path:
file = open(j, "rb")
image = file.read()
pynq_imgs.append(image)
def pynq_label_question(img_option, imgs):
image = imgs[img_option]
img = widgets.Image(value=image, format='png', width=300, align='center')
display(img)
def pop_images(image_choice):
t = widgets.interactive(pynq_label_question, imgs=fixed(pynq_imgs), img_option=fixed(image_choice))
return t
def populate_questions():
question_list = []
for i in range(len(all_options)):
question = [(q_descriptions[i]), show_buttons(all_options[i], all_answers[i], all_feedback[i])]
if i == 5:
platforms | |
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flexflow.core.flexflow_type import ActiMode, AggrMode, PoolType, DataType, LossType, MetricsType, OpType, str_to_enum, int_to_enum
import flexflow.torch.fx as fx
class FXTensor(object):
def __init__(self, fftensor):
self.fftensor = fftensor;
class PyTorchModel(object):
def __init__(self, filename=None, model=None):
self.tensor_dict = {}
self.lines = None
self.input_ops_list = None
self.output_ops_list = None
if filename != None:
self._init_from_file(filename)
elif model != None:
self._init_from_model(model)
def apply(self, ffmodel, input_tensors):
output_tensors = []
input_idx = 0
for line in self.lines:
items = line.strip().split(",")
assert len(items) >= 3, "wrong format"
items = [i.strip() for i in items]
print(items)
#get op name
op_name = items[0]
#get input ops' name
self.input_ops_list = items[1].split(":")
self.input_ops_list = [i.strip() for i in self.input_ops_list]
for i in self.input_ops_list:
if i == "":
self.input_ops_list.remove(i)
#get output ops' name
self.output_ops_list = items[2].split(":")
self.output_ops_list = [i.strip() for i in self.output_ops_list]
for i in self.output_ops_list:
if i == "":
self.output_ops_list.remove(i)
#get op type
op_type = str_to_enum(OpType, items[3])
if op_type == OpType.INPUT:
assert len(self.input_ops_list) == 0, "wrong format"
output = input_tensors[input_idx]
output = FXTensor(output)
input_idx += 1
elif op_type == OpType.LINEAR:
assert len(items) == 7, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
od = int(items[4])
activ = int_to_enum(ActiMode, int(items[5]))
bias = bool(int(items[6]))
output = ffmodel.dense(input=input_tensor, out_dim=od, activation=activ, use_bias=bias, name=op_name)
output = FXTensor(output)
elif op_type == OpType.CONV2D:
assert len(items) == 14, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
oc = int(items[4])
kh = int(items[5])
kw = int(items[6])
sh = int(items[7])
sw = int(items[8])
ph = int(items[9])
pw = int(items[10])
activ = int_to_enum(ActiMode, int(items[11]))
group = int(items[12])
bias = bool(int(items[13]))
output = ffmodel.conv2d(input=input_tensor, out_channels=oc, kernel_h=kh, kernel_w=kw, stride_h=sh, stride_w=sw, padding_h=ph, padding_w=pw, activation=activ, groups=group, use_bias=bias, name=op_name)
output = FXTensor(output)
elif op_type == OpType.POOL2D:
assert len(items) == 9, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
kh = int(items[4])
sh = int(items[5])
ph = int(items[6])
pt = int_to_enum(PoolType, int(items[7]))
activ = int_to_enum(ActiMode, int(items[8]))
output = ffmodel.pool2d(input=input_tensor, kernel_h=kh, kernel_w=kh, stride_h=sh, stride_w=sh, padding_h=ph, padding_w=ph, pool_type=pt, activation=activ, name=op_name)
output = FXTensor(output)
elif op_type == OpType.DROPOUT:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
r = float(items[4])
output = ffmodel.dropout(input=input_tensor, rate=r, seed=0, name=op_name)
output = FXTensor(output)
elif op_type == OpType.FLAT:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.flat(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.SCALAR_MULTIPLY:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.scalar_multiply(input=input_tensor, scalar=float(items[4]), name=op_name)
output = FXTensor(output)
elif op_type == OpType.SCALAR_FLOORDIV:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
if type(input_tensor) is float or type(input_tensor) is int:
output = input_tensor // float(items[4])
else:
assert 0, "Tensor floor division is not supported."
output = FXTensor(output)
elif op_type == OpType.SCALAR_ADD:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.scalar_add(input=input_tensor, scalar=float(items[4]), name=op_name)
output = FXTensor(output)
elif op_type == OpType.SCALAR_SUB:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.scalar_sub(input=input_tensor, scalar=float(items[4]), name=op_name)
output = FXTensor(output)
elif op_type == OpType.SCALAR_TRUEDIV:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.scalar_true_divide(input=input_tensor, scalar=float(items[4]), name=op_name)
output = FXTensor(output)
elif op_type == OpType.RELU:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.relu(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.GELU:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.gelu(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.IDENTITY:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.identity(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.LAYER_NORM:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.identity(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.EXPAND:
assert len(items) >= 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.identity(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.TRANSPOSE:
assert len(items) >= 6
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
perm = list(range(1,len(input_tensor.dims)+1))
a,b = int(items[4]),int(items[5])
perm[a-1],perm[b-1] = perm[b-1],perm[a-1]
output = ffmodel.transpose(input=input_tensor,perm=perm,name=op_name)
output = FXTensor(output)
elif op_type == OpType.PERMUTE:
assert len(items) > 4
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
perm = [int(dim) for dim in items[4:]]
output = ffmodel.transpose(input=input_tensor,perm=perm,name=op_name)
output = FXTensor(output)
elif op_type == OpType.RESHAPE:
assert len(items) >= 5
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
shape = items[4:]
for idx,dim in enumerate(shape):
try:
shape[idx] = int(dim)
except:
shape[idx] = self.tensor_dict[dim+op_name].fftensor
output = ffmodel.reshape(input=input_tensor,shape=shape,name=op_name)
output = FXTensor(output)
elif op_type == OpType.BATCH_MATMUL:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 2, "wrong format"
input_tensor1 = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
input_tensor2 = self.tensor_dict[self._get_input_key(op_name, 1)].fftensor
output = ffmodel.batch_matmul(A=input_tensor1, B=input_tensor2, name=op_name)
output = FXTensor(output)
elif op_type == OpType.SIGMOID:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.sigmoid(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.TANH:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.tanh(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.ELU:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.elu(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.SOFTMAX:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.softmax(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.CONCAT:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) >= 2, "wrong format"
input_tensors = []
for i in range(0, len(self.input_ops_list)):
input_tensors.append(self.tensor_dict[self._get_input_key(op_name, i)].fftensor)
ax = int(items[4])
output = ffmodel.concat(tensors=input_tensors, axis=ax, name=op_name)
output = FXTensor(output)
elif op_type == OpType.SPLIT:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
size = len(self.output_ops_list)
assert size >= 2, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
ax = int(items[4])
output = ffmodel.split(input=input_tensor, sizes=size, axis=ax, name=op_name)
assert type(output) == list
output = FXTensor(output)
elif op_type == OpType.GETITEM:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
assert type(input_tensor) == list or type(input_tensor) == tuple
idx = int(items[4])
output = input_tensor[idx]
output = FXTensor(output)
elif op_type == OpType.GETATTR:
assert len(items) == 5, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
if(items[4] == "shape"):
output = input_tensor.dims
else:
output = getattr(input_tensor, items[4])
output = FXTensor(output)
elif op_type == OpType.BATCH_NORM:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 1, "wrong format"
input_tensor = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
output = ffmodel.batch_norm(input=input_tensor, name=op_name)
output = FXTensor(output)
elif op_type == OpType.ADD:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 2, "wrong format"
input_tensor1 = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
input_tensor2 = self.tensor_dict[self._get_input_key(op_name, 1)].fftensor
output = ffmodel.add(x=input_tensor1, y=input_tensor2, name=op_name)
output = FXTensor(output)
elif op_type == OpType.MULTIPLY:
assert len(items) == 4, "wrong format"
assert len(self.input_ops_list) == 2, "wrong format"
input_tensor1 = self.tensor_dict[self._get_input_key(op_name, 0)].fftensor
input_tensor2 = self.tensor_dict[self._get_input_key(op_name, 1)].fftensor
output = ffmodel.multiply(x=input_tensor1, y=input_tensor2, name=op_name)
output = FXTensor(output)
elif op_type == OpType.OUTPUT:
assert len(self.input_ops_list) >= 1, "wrong format"
for i in range(0, len(self.input_ops_list)):
output_tensors.append(self.tensor_dict[self._get_input_key(op_name, i)].fftensor)
output = None
| |
# Calculate the time difference between two snapshots
time1 = self.get_dataset_from_snapshot(snap_idx1)
time2 = self.get_dataset_from_snapshot(snap_idx2)
return time2 - time1
class FullData(UIOData):
def __init__(self, model_path: str, eos_file='') -> None:
super().__init__(model_path, eos_file)
self.x = self['xc1']
self.y = self['xc2']
self.z = self['xc3']
# Standard x-z grid
self.set_grid(*initialise_grid(self.x, self.y, self.z, 'xz'))
def add_tau(self, mean_data, gravity: float):
# 'mean_data: MeanData' should correspond to the same model snapshot
# as the full file; needs to be passed in separately since it contains
# quantities used to calculate optical depth
# 'gravity' defined on linear scale
mean_data.add_qlmean_quantities(gravity)
self.tau = mean_data.tau
def quantity_at_tau_val(self, quantity: str, tau_val: float):
# 'tau_val' on linear scale
return interp1d(self.tau, self[quantity])(tau_val)
def z_zero_point(self, kind='tau', tau_val=1):
# set the 'z' zero point
# 'kind' is one of 'tau' (set based on optical depth,
# requires a 'tau_val')
# or 'bottom' (sets the bottom of the grid to be the zero point)
kinds = {
'tau': lambda x: self.quantity_at_tau_val('xc3', x),
'bottom': lambda x: None
}
zero_point = kinds[kind](tau_val)
return zero_point
# -------------------------------------------------------------------------
# Setters
# -------------------------------------------------------------------------
def set_grid(self, X_grid: np.ndarray, Y_grid: np.ndarray):
self.X_grid = X_grid
self.Y_grid = Y_grid
def set_z_zero_point(self, zero_point=None):
# Set the 'z' zero point
if not zero_point:
# Use tau=1 as standard
zero_point = self.z_zero_point()
self.z = self['z'] - zero_point
# -----------------------------------------------------------------------
# Plotting methods
# -----------------------------------------------------------------------
def plot_heatmap(self, ax, plot_values, log_quantity=False, title=None,
plot_type='image', add_cbar=True, cbar=None,
cmap='jet', cbar_label=None, cbar_label_pos='left',
origin='lower',
vlimits=None):
# Normalise data to range [0, 1] before applying colours
if log_quantity:
plot_values = np.log10(plot_values)
if vlimits: # should be a tuple of (vmin, vmax)
vmin, vmax = vlimits
else: # determine min,max from plot values
vmin, vmax = plot_values.min(), plot_values.max()
norm = Normalize(vmin=vmin, vmax=vmax)
# Plot a heatmap using X_, Y_grids and a Z datacube 'plot_values'
if plot_type == 'mesh':
im = ax.pcolormesh(self.X_grid, self.Y_grid,
plot_values, cmap=cmap, norm=norm)
elif plot_type == 'contour':
im = ax.contourf(self.X_grid, self.Y_grid,
plot_values, cmap=cmap, norm=norm)
elif plot_type == 'image':
x_limits = [self.X_grid[0][0], self.X_grid[0][-1]]
y_limits = [self.Y_grid[0][0], self.Y_grid[-1][0]]
extent = (x_limits[0], x_limits[1], y_limits[0], y_limits[1])
im = ax.imshow(plot_values, interpolation='bilinear', origin=origin,
cmap=cmap, norm=norm, extent=extent)
else:
print(f"Warning: Plot type {plot_type} is not valid. Valid choices are \
'mesh', 'contour' and 'image'.")
print("Defaulting to 'mesh'.")
im = ax.pcolormesh(self.X_grid, self.Y_grid, plot_values, cmap='jet')
# Set number of ticks
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
if add_cbar:
# colorbar same height as heatmap
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
# Create new colorbar
if cbar is None:
cbar = ax.figure.colorbar(im, ax=ax, cax=cax)
# Set default colorbar ticks
ticks = np.linspace(vmin, vmax, num=5)
cbar.set_ticks(ticks)
cbar.ax.set_yticklabels([f"{t:.2f}" for t in ticks])
if cbar_label:
cbar_label_positions = {
'top': cbar.ax.set_title,
'right': cbar.set_label,
'bottom': cbar.ax.set_xlabel,
}
if cbar_label_pos in cbar_label_positions.keys():
cbar_label_positions[cbar_label_pos](cbar_label)
else:
print(f"Error: {cbar_label_pos} is not a valid choice."
"'top', 'right' and 'bottom' are valid choices. Using"
"'right'")
cbar_label_positions['right'](cbar_label)
if title:
ax.set_title(title)
return cbar
def plot_quantity_heatmap(self, ax: plt.Axes, key: str,
plane='xz', set_z_zero_to_tau_1=False,
title=None, cmap='jet', log_quantity=False,
xlabel=None, ylabel=None,
auto_label_axes=False,
origin='lower',
add_cbar=True, cbar_label=None,
cbar_label_pos='right',
average_snaps=False):
# Plot quantity in a certain plane as a heatmap. Axis labels are
# determined by 'plane'
# Set 'min-max' bounds for a key if they have not yet been set
set_min_max = False if key in self.min_max_dict else True
# Get data from box and set 'min-max' bounds for the key
# Average over all snapshots
if average_snaps:
data = self.average_quantity_over_snapshots(
key, set_min_max=set_min_max)
else:
data = self[key]
if set_min_max:
self.min_max_quantity_over_snapshots(key)
# Average over axis not in plane
if data is not None:
if len(data.shape) == 3:
# For now, assume IDL indexing since 'data' has not been converted to
# Python indexing
print(f"Averaging data with key {key} in {plane} plane.")
avg_data = average_data_to_plane(data, plane, is_idl_idx=True)
else:
print(
f"Data with key {key} has shape {data.shape}, skipping plane average.")
avg_data = data
else:
print(f"Error: Could not compute data for key {key}.")
return None
# Plot heatmap of data
# Make grid points for plotting plane
if set_z_zero_to_tau_1:
print("Setting z zero point")
self.set_z_zero_point()
self.set_grid(*initialise_grid(self.x, self.y, self.z, plane))
cbar = self.plot_heatmap(ax, avg_data, log_quantity=log_quantity,
plot_type='image', origin=origin,
title=title, cmap=cmap, add_cbar=add_cbar,
cbar_label=cbar_label,
cbar_label_pos=cbar_label_pos)
# Set labels
if auto_label_axes:
xlabel, ylabel = [f"{item} [km]" for item in list(plane)]
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return cbar
class MeanData(UIOData):
def __init__(self, model_path: str, eos_file='', gravity=None) -> None:
# gravity='infer' : infer from model name
super().__init__(model_path, eos_file)
self.z = self.get_z()
if gravity: # defined on linear scale
self.add_qlmean_quantities(gravity)
def add_qlmean_quantities(self, gravity: float):
# 'gravity' defined on linear scale
self.set_box(2)
z = self.get_x_vectors(convert_km=False)[2].squeeze()
xcm = self['kapparho_xmean']
tau0 = xcm[-1] / self['rho_xmean'][-1] * self['p_xmean'][-1] / gravity
# # Reverse 'xcm' to be monotonically decreasing to get correct integral
interp = PchipInterpolator(z, xcm[::-1])
# Integrate kappa-rho over z to get tau
# self.tau = cumtrapz(self['kapparho_xmean'], z)
self.frad = self['ferb_xmean']
self.tau = tau0 + interp.antiderivative()(z)
def quantity_at_tau_val(self, quantity: str, tau_val: float):
# 'tau_val' on linear scale
return interp1d(self.tau, self[quantity])(tau_val)
def z_zero_point(self, kind='tau', tau_val=1):
# set the 'z' zero point
# 'kind' is one of 'tau' (set based on optical depth,
# requires a 'tau_val')
# or 'bottom' (sets the bottom of the grid to be the zero point)
kinds = {
'tau': lambda x: self.quantity_at_tau_val('xc3', x),
'bottom': lambda x: None
}
zero_point = kinds[kind](tau_val)
return zero_point
def set_z_zero_point(self, zero_point=None):
# Set the 'z' zero point
if not zero_point:
# Use tau=1 as standard
zero_point = self.z_zero_point()
self.z -= zero_point
def first_snapshot(self, box_idx=2):
self.update_snapshot(0, box_idx)
def final_snapshot(self, box_idx=2):
self.update_snapshot(self.final_snap_idx, box_idx)
def prev_snapshot(self, box_idx=2):
# Look for previous model and make it the current model
potential_idx = self.snap_idx - 1
if potential_idx < self.first_snap_idx:
potential_idx = self.first_snap_idx
self.update_snapshot(potential_idx, box_idx)
def next_snapshot(self, box_idx=2):
# Look for next model and make it the current model
potential_idx = self.snap_idx + 1
if potential_idx >= self.final_snap_idx:
potential_idx = self.final_snap_idx
self.update_snapshot(potential_idx, box_idx)
def update_snapshot(self, snap_idx: int, box_idx=2):
# Update 'snap_idx', 'dataset' and 'box' properties
self.snap_idx = snap_idx
self.dataset = self.model.dataset[self.snap_idx]
self.box = self.dataset.box[box_idx] # '2' is z3, standard
self.box_keys = [key for key in self.box.keys()]
def set_box(self, box_idx: int):
self.box = self.dataset.box[box_idx]
def __getitem__(self, key: str) -> np.ndarray:
# Get quantity from current model's current snapshot's boxes
data = None
opts = ['+', '-', '*', '/', '^']
if any(opt in key for opt in opts):
# Split key by operators
pattern = r'|'.join([f"\{opt}" for opt in opts])
opt_keys = [o_key.strip() for o_key in re.split(pattern, key)]
# Replace expressions in 'key'
# Warning: if the 'opt_key' is present more than once, it breaks!
for opt_key in opt_keys:
key = key.replace(opt_key, f"self['{opt_key}']")
# Evaluate key
data = eval(key)
else:
# Derived quantities
if key.lower() == 'kinetic energy':
# Calculate kinetic energy with density and velocities
density = self.box['rho_xmean'].data
v1, v2, v3 = self.get_velocity_vectors(self.box)
# Full 3D kinetic energy
data = calculate_kinetic_energy(density, v1, v2, v3)
elif key.lower() == 'momentum':
# Calculate momentum with density and velocities
density = self.box['rho_xmean'].data
v1, v2, v3 = self.get_velocity_vectors(self.box)
# Full 3D momentum
data = calculate_momentum(density, v1, v2, v3)
# # EOS quantities # Don't use for mean files!
# eos_quantities = ['temperature', 'pressure', 'entropy']
# eos_quantities.extend(
# [f"{quantity}_xmean" for quantity in eos_quantities])
# if key.lower() in eos_quantities:
# rho, ei = self.box['rho_xmean'].data, self.box['ei_xmean'].data
# data = self.eos.STP(rho, ei, quantity=key)
# Box quantities
else:
data = self.box[key].data
return data.squeeze()
def min_max_quantity_over_snapshots(self, key: str):
# Get min & max of specified 'key' from 'self.box' across all snapshots
snap_idx = self.snap_idx # store reference before iterating
self.first_snapshot() # load first snapshot
model_min, model_max = self.get_min_max_quantity(key)
# Loop over snapshots
for i in range(self.final_snap_idx + 1):
data = self[key]
min_data, max_data = np.min(data), np.max(data)
if min_data < model_min:
model_min = min_data
if max_data > model_max:
model_max = max_data
self.next_snapshot()
self.update_snapshot(snap_idx) # revert to original snapshot
# Set dict and return? Is this confusing because of the side effect?
self.min_max_dict[key] = (model_min, model_max)
return model_min, model_max
def get_min_max_quantity(self, key):
min_quantity, max_quantity = np.min(
self[key]), np.max(self[key])
return min_quantity, max_quantity
def get_z(self, unit='km'):
# Get the 'z' direction from the box, assuming | |
import networkx as nx
import numpy as np
from copy import deepcopy
from collections import defaultdict
from ylearn.utils import to_repr
from . import prob
from .utils import (check_nodes, ancestors_of_iter, descendents_of_iter)
class CausalGraph:
"""
A class for representing DAGs of causal structures.
Attributes
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiDiGraph
Graph represented by the networkx package.
prob : ylearn.causal_model.prob.Prob
The encoded probability distribution of the causal graph.
latent_confounding_arcs : list of tuple of two str
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
is_dag : bool
Determine whether the graph is a DAG, which is a necessary condition
for it to be a valid causal graph.
c_components : set
The C-components of the graph.
observed_dag : nx.MultiDiGraph
A causal graph with only observed variables.
topo_order : list
The topological order of the graph.
explicit_unob_var_dag : nx.MultiDiGraph
A new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables. See latent_confounding_arcs for more
details of the unobserved variables.
Methods
----------
to_adj_matrix()
Return the numpy matrix of the adjecency matrix.
to_adj_list()
Return the numpy array of the adjecency matrix.
ancestors(y)
Return ancestors of y.
add_nodes(nodes, new=False)
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
add_edges_from(edge_list, new=False, observed=True)
Add all edges in the edge_list to the CausalGraph.
parents(x, observed=True)
Find the parents of the node x in the CausalGraph.
add_edge(i, j, observed=True)
Add an edge between nodes i and j to the CausalGraph. Add an unobserved
confounding arc if not observed.
remove_nodes(nodes, new=False)
Remove all nodes in the graph. If new, do this in a new CausalGraph.
remove_edge(i, j, observed=True)
Remove the edge in the CausalGraph. If observed, remove the unobserved
latent confounding arcs.
remove_edges_from(edge_list, new=False, observed=True)
Remove all edges in the edge_list in the CausalGraph.
build_sub_graph(subset)
Return a new CausalGraph as the subgraph of self with nodes in the
subset.
remove_incoming_edges(y, new=False)
Remove all incoming edges of all nodes in y. If new, return a new
CausalGraph.
remove_outgoing_edges(y, new=False)
Remove all outgoing edges of all nodes in y. If new, return a new
CausalGraph.
"""
def __init__(self, causation, dag=None, latent_confounding_arcs=None):
"""
Parameters
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiGraph, optional
A konw graph structure represented. If provided, dag must represent
the causal structures stored in causation. Defaults to None.
latent_confounding_arcs : set or list of tuple of two str, optional
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs to represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
"""
self.causation = defaultdict(list, causation)
self.ava_nodes = self.causation.keys()
self.dag = self.observed_dag.copy() if dag is None else dag
# add unobserved bidirected confounding arcs to the graph, with the
# letter 'n' representing that the edge is unobserved
if latent_confounding_arcs is not None:
for edge in latent_confounding_arcs:
self.dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
@property
def prob(self):
"""The encoded probability distribution.
Returns
----------
Prob
"""
return prob.Prob(variables=set(self.causation.keys()))
@property
def latent_confounding_arcs(self):
"""Return the latent confounding arcs encoded in the graph.
Returns
----------
list
"""
W = nx.to_numpy_matrix(self.dag)
a, w_t = np.where(W >= 1), W.T.A
arcs, nodes = [], list(self.dag.nodes)
for row, col in zip(a[0], a[1]):
if w_t[row][col] >= 1 and (nodes[col], nodes[row]) not in arcs:
arcs.append((nodes[row], nodes[col]))
return arcs
@property
def is_dag(self):
"""Verify whether the constructed graph is a DAG.
"""
# TODO: determin if the graph is a DAG, try tr(e^{W\circledot W}-d)=0
return nx.is_directed_acyclic_graph(self.observed_dag)
def to_adj_matrix(self):
"""Return the adjacency matrix.
"""
W = nx.to_numpy_matrix(self.dag)
return W
# def to_adj_list(self):
# """Return the adjacency list."""
# pass
def is_d_separated(self, x, y, test_set):
"""Check if test_set d-separates x and y.
Parameters
----------
x : set of str
y : set of str
test_set : set of str
Returns
----------
Bool
If test_set d-separates x and y, return True else return False.
"""
return nx.d_separated(self.explicit_unob_var_dag, x, y, test_set)
@property
def c_components(self):
"""Return the C-component set of the graph.
Returns
----------
set of str
The C-component set of the graph
"""
bi_directed_graph = nx.Graph()
bi_directed_graph.add_nodes_from(self.dag.nodes)
bi_directed_graph.add_edges_from(self.latent_confounding_arcs)
return nx.connected_components(bi_directed_graph)
def ancestors(self, x):
"""Return the ancestors of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Ancestors of nodes in x in the graph
"""
g = self.observed_dag
return ancestors_of_iter(g, x)
def descendents(self, x):
"""Return the descendents of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Descendents of nodes x of the graph
"""
# des = set()
# x = {x} if isinstance(x, str) else x
# for node in x:
# des.add(node)
# try:
# des.update(nx.descendants(self.observed_dag, node))
# except Exception:
# pass
g = self.observed_dag
return descendents_of_iter(g, x)
def parents(self, x, only_observed=True):
"""Return the direct parents of the node x in the graph.
Parameters
----------
x : str
Name of the node x.
only_observed : bool, optional
If True, then only find the observed parents in the causal graph,
otherwise also include the unobserved variables, by default True
Returns
-------
list
Parents of the node x in the graph
"""
if only_observed:
return self.causation[x]
else:
return list(self.explicit_unob_var_dag.predecessors(x))
@property
def observed_dag(self):
"""Return the observed part of the graph, including observed nodes and
edges between them.
Returns
----------
nx.MultiDiGraph
The observed part of the graph
"""
edges = []
for k, v in self.causation.items():
for para in v:
edges.append((para, k, 0))
ob_dag = nx.MultiDiGraph()
ob_dag.add_edges_from(edges)
return ob_dag
@property
def explicit_unob_var_dag(self):
"""Build a new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables
Returns
----------
nx.MultiDiGraph
"""
new_dag = self.observed_dag
for i, (node1, node2) in enumerate(self.latent_confounding_arcs):
new_dag.add_edges_from(
[(f'U{i}', node1, 'n'), (f'U{i}', node2, 'n')]
)
return new_dag
@property
def topo_order(self):
"""Retrun the topological order of the nodes in the observed graph
Returns
----------
generator
Nodes in the topological order
"""
return nx.topological_sort(self.observed_dag)
def add_nodes(self, nodes, new=False):
"""
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
Parameters
----------
nodes : set or list
new : bool, optional
If new create and return a new graph. Defaults to False.
Returns
----------
CausalGraph
"""
ori_nodes = self.dag.nodes
if not new:
self.dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
self.causation[node] = []
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
new_dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
new_causation[node] = []
return CausalGraph(new_causation, dag=new_dag)
def add_edges_from(self, edge_list, new=False, observed=True):
"""
Add edges to the causal graph.
Parameters
----------
edge_list : list
Every element of the list contains two elements, the first for
the parent
new : bool
Return a new graph if set as True
| |
callback once buffer is filled for the first
# time
if packetCounter == BUFFER:
startFlag = 1
# if audioBuffer is getting way too long, chop it back, the
# treshold is two times the normal size
if len(audioBuffer) > 2*CHUNK*BUFFER*2:
del audioBuffer[0:2*CHUNK*BUFFER]
overFlowFlag += 1
# display state
if packetCounter % 250 == 0:
print('Chunk no ', packetCounter, 'received successfully')
print('Current buffer size: '+str(len(audioBuffer)))
# append timePacket and packetNumber lists
packetListClient.append(packetNumber)
timeListClient.append(float(timePacket[0]))
# write audio part to file
fIn1.write(data)
# end messages
messagesOutput(packetCounter, timeListServer, timeListClient,
packetListClient, overFlowFlag)
# cleanup
cleanupOutput()
return
#%% Function to display closing stats and messages for output
def messagesOutput(packetCounter, timeListServer, timeListClient,
packetListClient, overFlowFlag):
# summary message
print('\n\n'+str(packetCounter) +
' chunks received, time taken for all chunks: ' +
str(timeListServer[-1]-timeListServer[0]))
# more diagnostic messages
print('\nReceived '+str(packetCounter)+' audio chunks')
# underflow events
print('\nBuffer underflow occured '+str(underFlowFlag)+' times')
# overflow events
print('\nBuffer overflow occured '+str(overFlowFlag)+' times')
# print average transmission time
timeListDiff = list(map(sub, timeListServer, timeListClient))
print('\nAverage difference between client and server side timestamps: ',
sum(timeListDiff) / len(timeListDiff), ' secs \n\nClient timestamp '
'is taken after reading audio input buffer \nServer timestamp is '
'taken when pushing the received data into audio output buffer\n\n')
# Saving data
# write out timestamps into a csv file
output = open('timestamps.csv', 'wb')
writer = csv.writer(output)
for packet, client, server, diff in zip(packetListClient, timeListClient,
timeListServer, timeListDiff):
writer.writerow((packet, client, server, diff))
output.close()
return
#%% Function to integrate the pieces and run the whole shitstorm
def goGo(NAT, STUN, ROLE, TRIALTYPE, WHICHSEED, LOGTTL):
# these need to be global for callbacks
global fIn1, fIn2, fOut, PortIn, PortOut, PortComm
# load all settings, magic numbers
[punchTimeout, CHANNELS, RATE, FORMAT, savefileOut,
savefileIn1, savefileIn2, savefileLog, savefileTTL, portIn, portOut,
PortIn, PortOut, PortComm, serialSettings, turntime, timeDiff, instText1,
instText2, instText3, turnText1, turnText2, retellText1a, retellText1b,
retellText2a, retellText2b, retellText2c,
turnN, turnCurrent, letterH, instTime, startLag,
retellLength, keylist] = magicNumbers(ROLE, TRIALTYPE, WHICHSEED)
# networkInit
networkInit(STUN, NAT, portIn, portOut, punchTimeout)
# open files we will use for writing stuff out
fIn1 = open(savefileIn1, 'wb')
fIn2 = open(savefileIn2, 'wb')
fOut = open(savefileOut, 'wb')
fLog = open(savefileLog, 'w') # text file
fTTL = open(savefileTTL, 'w') # text file
startTTLTime = time.time()
fTTL.write('TTL logging startTime: '+str(startTTLTime)+'\n\n')
# audio I/O processes and TTL recording run in separate processes
queueInput = multiprocessing.Queue()
queueOutput = multiprocessing.Queue()
queueTTL = multiprocessing.Queue()
audioInput = multiprocessing.Process(name='audioInput',
target=inputProcess,
args=(FORMAT,
CHANNELS,
RATE,
CHUNK,
queueInput,))
audioOutput = multiprocessing.Process(name='audioOutput',
target=outputProcess,
args=(BUFFER,
CHUNK,
FORMAT,
CHANNELS,
RATE,
queueOutput,))
audioInput.start()
audioOutput.start()
# if LOGTTL, start serial listening process
if LOGTTL:
serialTTL = multiprocessing.Process(name='serialTTL',
target=serialLog,
args=(serialSettings,
queueTTL,))
serialTTL.start()
else:
serialTTL = False
#%% Crazy & Stupid part: importing late, because psychopy breaks stuff on
# mac otherwise, if imported before multiprocesses have started
from psychopy import core, visual, event
#%% Start Visual part
# set up window: black, fullscreen, try a common size for projectors
try:
win = visual.Window([1366, 768],
color='black',
monitor='testMonitor',
fullscr=True)
except:
print('\nProblem while setting up window')
sys.exit()
# Audio check screen (waiting)
waitText = 'Check audio, press s to start'
waitInst = visual.TextStim(win,
waitText,
color='white',
height=letterH)
waitInst.draw()
win.flip()
# to capture keypress event times correctly, we start a clock and get event
# times relative to that
keyClock = core.Clock()
# start time of clock in unix time
keyClockStart = keyClock.getLastResetTime()
# wait for key presses (INCLUDING TTL)
# startSyncStamp = time.time()
# while True and (time.time()-startSyncStamp < 4):
while True:
core.wait(0.01)
# capture key presses, timing is relative to keyClock
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if 's' was pressed, go on
if keys[0][0] == 's':
break
# if event.getKeys returns a '5' or 'ó', its a TTL
elif keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL, audioInput,
audioOutput, serialTTL, fLog, fTTL, win)
return
#%% Synch computers
# Synch process: (1) handshake to start, (2) exchange time stamps,
# derive common start time (average of time stamps + startLag)
commFlag = True
incoming = []
# first a handshake for synch
print('\nStarting synch\n')
while commFlag:
core.wait(0.01)
# capture keys (TTL)
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL, audioInput,
audioOutput, serialTTL, fLog, fTTL, win)
return
# send packets
try:
socketComm.sendto('syncTimeNow'.encode(), (IP, PortComm))
except:
print('\nProblem sending a syncTimeNow packet...\n')
pass
try:
incoming = socketComm.recv(CHUNK)
except:
pass
if incoming == 'syncTimeNow'.encode():
incoming = []
# time stamp on our side
timeHere = time.time()
print('\nReceived synch handshake, sending timeHere',
str(timeHere), '\n')
while True:
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL,
audioInput, audioOutput, serialTTL,
fLog, fTTL, win)
return
# send our time stamp
for i in range(2):
try:
socketComm.sendto(struct.pack('<d', timeHere),
(IP, PortComm))
except:
print('\nProblem sending a timeHere packet...\n')
pass
# read out socket
try:
incoming = socketComm.recv(CHUNK)
except:
pass
# if read out data is what we would expect, create startTime
if bool(incoming) & bool(len(incoming) == 8):
print('\nGot incoming time\n')
# unpack time stamp from other side
timeThere = struct.unpack('<d', incoming)[0]
print('\nIncoming timeThere is',
str(timeThere), '\n')
# start is at the max of the two timestamps
# + a predefined lag
startTimeCommon = max(timeThere, timeHere) + startLag
print('\nGot shared startTimeCommon:',
str(startTimeCommon), '\n')
commFlag = False
# insurance policy - send it last time
for i in range(2):
socketComm.sendto(struct.pack('<d', timeHere),
(IP, PortComm))
break
# Put up a synch screen while we are waiting for startTimeCommon
synchText = 'Synching start with other site...'
synchStim = visual.TextStim(win,
synchText,
color='white',
height=letterH)
synchStim.draw()
win.flip()
# log startTimeCommon
fLog.write('startTimeCommon: ' + str(startTimeCommon) + '\n')
# common start is synched at a precision of
# keyboard polling (few ms) + ntp diff + hardware jitter
while time.time() < startTimeCommon:
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL, audioInput,
audioOutput, serialTTL, fLog, fTTL, win)
return
# log audio file object positions at start
fLog.write('Audio file positions at startTimeCommon:\n')
fLog.write('fIn1: ' + str(fIn1.tell()) + '\n')
fLog.write('fIn2: ' + str(fIn2.tell()) + '\n')
fLog.write('fOut: ' + str(fOut.tell()) + '\n')
#%% Instructions
# Instructions: basic text instructions, two pages
instructions1 = visual.TextStim(win,
instText1,
color='white',
height=letterH)
instructions2 = visual.TextStim(win,
instText2,
pos=[0, 0.1],
color='white',
height=letterH)
# draw, flip and give some time (instTime) for reading
instructions1.draw()
win.flip()
# capture key presses /TTLs
while (time.time()-startTimeCommon) < instTime:
core.wait(0.01)
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL, audioInput,
audioOutput, serialTTL, fLog, fTTL, win)
return
instructions2.draw()
win.flip()
# escape jumps instructions
while (time.time()-startTimeCommon) < (instTime + instTime/5):
core.wait(0.01)
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
EscCleanup(queueInput, queueOutput, queueTTL, audioInput,
audioOutput, serialTTL, fLog, fTTL, win)
return
# countback before start, topic
for seconds in reversed(range(5)):
# put up the correct number on display
instructions3 = visual.TextStim(win,
instText3+str(seconds+1),
pos=[0, -0.1],
color='white',
height=letterH)
instructions2.draw() # keep on display the last instruction as well
instructions3.draw()
win.flip()
startCountbackFlip = time.time()
# capture keypresses / TTLs while waiting for next countback second
while time.time()-startCountbackFlip < 1:
keys = event.getKeys(keylist,
timeStamped=keyClock)
if keys:
# if event.getKeys returns a '5' or 'ó', its a TTL
if keys[0][0] == '5' or keys[0][0] == 'ó':
fTTL.write(str(keys[0][1] + keyClockStart)+'\n')
# escape quits
elif keys[0][0] == 'escape':
| |
unlock 54423720 0q82_05
"""
# print(
# step + " " +
# str(word.txt.encode('ascii', 'ignore')).replace(" ", "_") + " " +
# str(LexSentence.inner) + " " +
# str(LexSentence.outer) + " " +
# ("LOCKED " if self._global_lock.locked() else "unlock ") +
# str(id(self._global_lock)) + " " +
# self.max_idn().qstring() +
# "\n", end=""
# )
try:
LexSentence.outer += 1
droid("INSERT_A")
with self._lock_next_word(): # test_word.Word0080Threading.LexManipulated.cop1
try:
LexSentence.inner += 1
droid("INSERT_B")
self._start_transaction() # test_word.Word0080Threading.LexManipulated.cop2
idn_of_new_word = self.next_idn()
self._critical_moment_1() # test_word.Word0080Threading.LexManipulated.cop3
word.set_idn_if_you_really_have_to(idn_of_new_word)
self._critical_moment_2()
self.insert_word(word)
finally:
droid("INSERT_C")
LexSentence.inner -= 1
# TODO: Unit test this lock, with "simultaneous" inserts on multiple threads.
finally:
droid("INSERT_D")
LexSentence.outer -= 1
def _critical_moment_1(self):
"""For testing, hold up this step to raise a duplicate IDN error."""
def _critical_moment_2(self):
"""For testing, hold up this step to raise a duplicate IDN error."""
def _start_transaction(self):
"""Whatever needs to happen just before getting the next idn. Do nothing by default."""
def next_idn(self):
return self.max_idn().inc() # Crude reinvention of AUTO_INCREMENT
def max_idn(self):
raise NotImplementedError()
def server_version(self):
return "(not implemented)"
def disconnect(self):
raise NotImplementedError()
def find_last(self, **kwargs):
# TODO: In LexMySQL, do this more efficiently:
# limit find_words() to latest using sql LIMIT.
# TODO: Who should make sure idn_ascending is True?
bunch = self.find_words(**kwargs)
try:
return bunch[-1]
except IndexError:
raise self.NotFound
# def read_word(self, txt_or_idn_etc):
# if Text.is_valid(txt_or_idn_etc):
# # word = self.word_class(txt=txt_or_idn_etc) <-- well that was dumb ... OR NOT
# word = self.word_class(txt_or_idn_etc)
# # word = self[txt_or_idn_etc] <--- haha no, that is infinite recursion
# # word._from_definition(txt_or_idn_etc)
# # FIXME: Should verify that lex defined it.
# # Maybe eventually the user can define it himself.
# # word = self.lex.find_words(sbj=self.lex, vrb='define', txt=txt_or_idn_etc)
#
# # self.populate_word_from_definition(word, txt_or_idn_etc) # TODO: redundant??
# return word
# else:
# return super(LexSentence, self).read_word(txt_or_idn_etc)
def read_word(self, txt_or_idn_etc):
if Text.is_valid(txt_or_idn_etc):
word = self.word_class(txt_or_idn_etc)
return word
else:
return super(LexSentence, self).read_word(txt_or_idn_etc)
class CreateWordError(ValueError):
"""LexSentence.create_word() argument error."""
def create_word(
self,
sbj,
vrb,
obj,
num=None,
txt=None,
num_add=None,
use_already=False,
override_idn=None
):
"""
Construct a new sentence from a 3-word subject-verb-object.
"""
# TODO: Disallow num,txt positionally, unlike Word.says()
# TODO: Allow sbj=lex
assert isinstance(sbj, (Word, Number, type(''))), "sbj cannot be a " + type_name(sbj)
assert isinstance(vrb, (Word, Number, type(''))), "vrb cannot be a " + type_name(vrb)
assert isinstance(obj, (Word, Number, type(''))), "obj cannot be a " + type_name(obj)
# if isinstance(txt, numbers.Number) or Text.is_valid(num):
# # TODO: Why `or` not `and`?
# (txt, num) = (num, txt)
if num is not None and num_add is not None:
raise self.CreateWordError(
"{self_type}.create_word() cannot specify both num and num_add.".format(
self_type=type_name(self),
)
)
num = num if num is not None else 1
txt = txt if txt is not None else ''
if not Number.is_number(num):
# TODO: Allow q-strings for num. I.e. raise this exception on Number(num) error.
raise self.CreateWordError(
"Wrong type for {self_type}.create_word(num={num_type})".format(
self_type=type_name(self),
num_type=type_name(num),
)
)
if not Text.is_valid(txt):
raise self.CreateWordError(
"Wrong type for {self_type}.create_word(txt={txt_type})".format(
self_type=type_name(self),
txt_type=type_name(txt),
)
)
new_word = self.word_class(
sbj=sbj,
vrb=vrb,
obj=obj,
num=Number(num),
txt=txt,
)
if num_add is not None:
assert Number.is_number(num_add)
self.populate_word_from_sbj_vrb_obj(new_word, sbj, vrb, obj)
if new_word.exists():
# noinspection PyProtectedMember
new_word._fields['num'] += Number(num_add)
new_word.set_idn_if_you_really_have_to(Number.NAN)
else:
# noinspection PyProtectedMember
new_word._fields['num'] = Number(num_add)
new_word.save()
elif use_already:
old_word = self.word_class(
sbj=sbj,
vrb=vrb,
obj=obj
)
self.populate_word_from_sbj_vrb_obj(old_word, sbj, vrb, obj)
if not old_word.exists():
new_word.save()
elif (
old_word.txt == new_word.txt and
old_word.num == new_word.num
):
# NOTE: There was an identical sentence already (same s,v,o,t,n).
# (And it was the latest word matching (s,v,o).)
# Fetch it so new_word.exists().
# This is the only path through create_word()
# where no new sentence is created.
# That is, where new_word is an old word.
# NOTE: It only happens when the old_word is the NEWEST of its kind (s,v,o)
# This was a problem with multiple explanations on a word.
self.populate_word_from_sbj_vrb_obj_num_txt(
new_word,
sbj,
vrb,
obj,
Number(num),
txt
)
assert new_word.idn == old_word.idn, "Race condition {old} to {new}".format(
old=old_word.idn.qstring(),
new=new_word.idn.qstring()
)
else:
new_word.save()
else:
new_word.save(override_idn=override_idn)
return new_word
@classmethod
def now_number(cls):
"""
Returns a qiki.Number suitable for the whn field: seconds since 1970 UTC.
Not to be confused with qiki.TimeLex.now_word() which is a qiki.Word
an abstraction representing the current time.
"""
return TimeLex().now_word().num
def native_num(num):
if num.is_suffixed():
# TODO: Complex?
return num.qstring()
elif not num.is_reasonable():
# THANKS: JSON is a dummy about NaN, inf,
# https://stackoverflow.com/q/1423081/673991#comment52764219_1424034
# THANKS: None to nul, https://docs.python.org/library/json.html#py-to-json-table
return None
elif num.is_whole():
return int(num)
else:
# TODO: Ludicrous numbers should become int.
return float(num)
# import json
#
#
# class WordEncoder(json.JSONEncoder):
# def default(self, o):
# if isinstance(o, Word):
# return repr(o)
# else:
# return super(WordEncoder, self).default(o)
class LexInMemory(LexSentence):
"""In-memory lex. Always start empty."""
def __init__(self, **kwargs):
super(LexInMemory, self).__init__(**kwargs)
# TODO: new_lex_memory = LexMemory(old_lex_memory)?
self.words = None
self.install_from_scratch()
def insert_word(self, word):
assert not word.idn.is_nan()
word.whn = self.now_number()
self.words.append(word)
assert int(word.idn) == len(self.words) - 1
# NOTE: Crude expectation word insertion order 0,1,2,...
# noinspection PyProtectedMember
word._now_it_exists()
def disconnect(self):
pass
def install_from_scratch(self):
self.words = []
# NOTE: Assume zero-starting idns
self._lex = self.word_class(self.IDN_LEX)
self._install_all_seminal_words()
self._lex = self.words[int(self.IDN_LEX)]
self._noun = self.words[int(self.IDN_NOUN)]
self._verb = self.words[int(self.IDN_VERB)]
self._define = self.words[int(self.IDN_DEFINE)]
def uninstall_to_scratch(self):
del self.words
def populate_word_from_idn(self, word, idn):
try:
integer_identifier = int(idn)
except ValueError: # e.g. Word(Number.NAN)
return False
if 0 <= integer_identifier < len(self.words):
# NOTE: We cannot trust self.words[-1] to raise an exception,
# so we screen out-of-range identifiers the unpythonic way.
word_source = self.words[integer_identifier]
word.populate_from_word(word_source)
return True
else:
return False
def populate_word_from_definition(self, word, define_txt):
"""Flesh out a word by its txt. sbj=lex, vrb=define only."""
for word_source in self.words:
if (
word_source.sbj.idn == self.IDN_LEX and
word_source.vrb.idn == self.IDN_DEFINE and
word_source.txt == Text(define_txt)
):
word.populate_from_word(word_source)
return True
return False
def populate_word_from_sbj_vrb_obj(self, word, sbj, vrb, obj):
for word_source in reversed(self.words):
# NOTE: reversed() to prefer the LATEST word that matches s,v,o
if word_source.sbj == sbj and word_source.vrb == vrb and word_source.obj == obj:
word.populate_from_word(word_source)
return True
return False
def populate_word_from_sbj_vrb_obj_num_txt(self, word, sbj, vrb, obj, num, txt):
for word_source in reversed(self.words):
if (
word_source.sbj == sbj and
word_source.vrb == vrb and
word_source.obj == obj and
word_source.num == num and
word_source.txt == txt
):
word.populate_from_word(word_source)
return True
return False
def max_idn(self):
try:
return self.words[-1].idn
except (AttributeError, IndexError): # whether self.words is missing or empty
return Number(0)
def find_words(
self,
idn=None,
sbj=None,
vrb=None,
obj=None,
txt=None,
# TODO: num
idn_ascending=True,
jbo_ascending=True,
jbo_vrb=(),
jbo_strictly=False,
debug=False
):
found_words = []
for word_source in self.words if idn_ascending else reversed(self.words):
hit = True
if idn is not None and not self.word_match(word_source.idn, idn): #
# was word_source.idn != self.idn_ify(idn):
# TODO: Why does word_match(word_source.idn, idn) fail in one test?
hit = False
if sbj is not None and not self.word_match(word_source.sbj, sbj):
hit = False
if vrb is not None and not self.word_match(word_source.vrb, vrb):
hit = False
if obj is not None and not self.word_match(word_source.obj, obj):
hit = False
if txt is not None and not self.txt_match(word_source.txt, txt):
# was word_source.txt != Text(txt):
hit = False
if hit:
found_words.append(self[word_source]) # copy constructor
if jbo_vrb:
restricted_found_words = []
for found_word in found_words:
jbo = []
for other_word in self.words:
if (
self.word_match(other_word.obj, found_word.idn) and
self.word_match(other_word.vrb, jbo_vrb)
):
jbo.append(other_word)
new_word = self[found_word]
assert new_word is not found_word
new_word.jbo = jbo
# FIXME: Whoa this could add a jbo to the in-memory lex object couldn't it!
# Same bug exists with LexMySQL instance maybe!
# Maybe this is a reason NOT to enforce a lex being a singleton.
# Or if this bug does NOT happen
# it blows a hole in the idea lex ever was a singleton.
# I don't see where Word._from_word() enforces that.
# TODO: Test whether lex[lex] is lex -- Oh it is in test_08_lex_square_lex
if jbo or not jbo_strictly:
restricted_found_words.append(new_word)
return restricted_found_words
else:
return found_words
def word_match(self, word_1, word_or_words_2):
"""
Is a word equal to another word (or any of a nested collection of words)?
Actually they can be idns too.
"""
assert not is_iterable(word_1)
if is_iterable(word_or_words_2):
for word_2 in word_or_words_2:
if | |
"""
SALTS XBMC Addon
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from StringIO import StringIO
import gzip
import datetime
import _strptime # @UnusedImport
import time
import re
import os
import urllib2
import urllib
import hashlib
import xml.etree.ElementTree as ET
import htmlentitydefs
import json
import log_utils
import utils
import xbmc
import xbmcaddon
import xbmcvfs
import kodi
import pyaes
from constants import * # @UnusedWildImport
from salts_lib import strings
logger = log_utils.Logger.get_logger()
THEME_LIST = ['Shine', 'Luna_Blue', 'Iconic', 'Simple', 'SALTy', 'SALTy (Blended)', 'SALTy (Blue)', 'SALTy (Frog)', 'SALTy (Green)',
'SALTy (Macaw)', 'SALTier (Green)', 'SALTier (Orange)', 'SALTier (Red)', 'IGDB', 'Simply Elegant', 'IGDB Redux', 'NaCl']
THEME = THEME_LIST[int(kodi.get_setting('theme') or 0)]
if kodi.has_addon('script.salts.themepak'):
themepak_path = xbmcaddon.Addon('plugin.video.saltsrd.lite').getAddonInfo('path')
else:
themepak_path = kodi.get_path()
THEME_PATH = os.path.join(themepak_path, 'art', 'themes', THEME)
translations = kodi.Translations(strings.STRINGS)
SORT_FIELDS = [
(SORT_LIST[int(kodi.get_setting('sort1_field'))], SORT_SIGNS[kodi.get_setting('sort1_order')]),
(SORT_LIST[int(kodi.get_setting('sort2_field'))], SORT_SIGNS[kodi.get_setting('sort2_order')]),
(SORT_LIST[int(kodi.get_setting('sort3_field'))], SORT_SIGNS[kodi.get_setting('sort3_order')]),
(SORT_LIST[int(kodi.get_setting('sort4_field'))], SORT_SIGNS[kodi.get_setting('sort4_order')]),
(SORT_LIST[int(kodi.get_setting('sort5_field'))], SORT_SIGNS[kodi.get_setting('sort5_order')]),
(SORT_LIST[int(kodi.get_setting('sort6_field'))], SORT_SIGNS[kodi.get_setting('sort6_order')])]
def art(name):
path = os.path.join(THEME_PATH, name)
if not xbmcvfs.exists(path):
if name == 'fanart.jpg':
path = os.path.join(kodi.get_path(), name)
else:
path = path.replace('.png', '.jpg')
return path
def show_id(show):
queries = {}
ids = show['ids']
for key in ('trakt', 'imdb', 'tvdb', 'tmdb', 'tvrage', 'slug'):
if key in ids and ids[key]:
queries['id_type'] = key
queries['show_id'] = ids[key]
break
return queries
def title_key(title):
if title is None: title = ''
temp = title.upper()
if temp.startswith('THE '):
offset = 4
elif temp.startswith('A '):
offset = 2
elif temp.startswith('AN '):
offset = 3
else:
offset = 0
return title[offset:]
def _released_key(item):
if 'released' in item:
return item['released']
elif 'first_aired' in item:
return item['first_aired']
else:
return 0
def sort_list(sort_key, sort_direction, list_data):
logger.log('Sorting List: %s - %s' % (sort_key, sort_direction), log_utils.LOGDEBUG)
# logger.log(json.dumps(list_data), log_utils.LOGDEBUG)
reverse = False if sort_direction == TRAKT_SORT_DIR.ASCENDING else True
if sort_key == TRAKT_LIST_SORT.RANK:
return sorted(list_data, key=lambda x: x['rank'], reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.RECENTLY_ADDED:
return sorted(list_data, key=lambda x: x['listed_at'], reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.TITLE:
return sorted(list_data, key=lambda x: title_key(x[x['type']].get('title')), reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.RELEASE_DATE:
return sorted(list_data, key=lambda x: _released_key(x[x['type']]), reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.RUNTIME:
return sorted(list_data, key=lambda x: x[x['type']].get('runtime', 0), reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.POPULARITY:
return sorted(list_data, key=lambda x: x[x['type']].get('votes', 0), reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.PERCENTAGE:
return sorted(list_data, key=lambda x: x[x['type']].get('rating', 0), reverse=reverse)
elif sort_key == TRAKT_LIST_SORT.VOTES:
return sorted(list_data, key=lambda x: x[x['type']].get('votes', 0), reverse=reverse)
else:
logger.log('Unrecognized list sort key: %s - %s' % (sort_key, sort_direction), log_utils.LOGWARNING)
return list_data
def make_seasons_info(progress):
season_info = {}
if progress:
for season in progress['seasons']:
info = {}
if 'aired' in season: info['episode'] = info['TotalEpisodes'] = season['aired']
if 'completed' in season: info['WatchedEpisodes'] = season['completed']
if 'aired' in season and 'completed' in season:
info['UnWatchedEpisodes'] = season['aired'] - season['completed']
info['playcount'] = season['aired'] if season['completed'] == season['aired'] else 0
if 'number' in season: info['season'] = season['number']
season_info[str(season['number'])] = info
return season_info
def make_episodes_watched(episodes, progress):
watched = {}
for season in progress['seasons']:
watched[str(season['number'])] = {}
for ep_status in season['episodes']:
watched[str(season['number'])][str(ep_status['number'])] = ep_status['completed']
for episode in episodes:
season_str = str(episode['season'])
episode_str = str(episode['number'])
if season_str in watched and episode_str in watched[season_str]:
episode['watched'] = watched[season_str][episode_str]
else:
episode['watched'] = False
return episodes
def make_trailer(trailer_url):
match = re.search('\?v=(.*)', trailer_url)
if match:
return 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % (match.group(1))
def make_ids(item):
info = {}
if 'ids' in item:
ids = item['ids']
if 'imdb' in ids: info['code'] = info['imdbnumber'] = info['imdb_id'] = ids['imdb']
if 'tmdb' in ids: info['tmdb_id'] = ids['tmdb']
if 'tvdb' in ids: info['tvdb_id'] = ids['tvdb']
if 'trakt' in ids: info['trakt_id'] = ids['trakt']
if 'slug' in ids: info['slug'] = ids['slug']
return info
def make_people(item):
people = {}
if 'crew' in item and 'directing' in item['crew']:
directors = [director['person']['name'] for director in item['crew']['directing'] if director['job'].lower() == 'director']
people['director'] = ', '.join(directors)
if 'crew' in item and 'writing' in item['crew']:
writers = [writer['person']['name'] for writer in item['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']]
people['writer'] = ', '.join(writers)
return people
def make_air_date(first_aired):
utc_air_time = utils.iso_2_utc(first_aired)
try: air_date = time.strftime('%Y-%m-%d', time.localtime(utc_air_time))
except ValueError: # windows throws a ValueError on negative values to localtime
d = datetime.datetime.fromtimestamp(0) + datetime.timedelta(seconds=utc_air_time)
air_date = d.strftime('%Y-%m-%d')
return air_date
def get_section_params(section):
section_params = {}
section_params['section'] = section
if section == SECTIONS.TV:
section_params['next_mode'] = MODES.SEASONS
section_params['folder'] = True
section_params['video_type'] = VIDEO_TYPES.TVSHOW
section_params['content_type'] = CONTENT_TYPES.TVSHOWS
section_params['search_img'] = 'television_search.png'
section_params['label_plural'] = i18n('tv_shows')
section_params['label_single'] = i18n('tv_show')
else:
section_params['next_mode'] = MODES.GET_SOURCES
section_params['folder'] = False
section_params['video_type'] = VIDEO_TYPES.MOVIE
section_params['content_type'] = CONTENT_TYPES.MOVIES
section_params['search_img'] = 'movies_search.png'
section_params['label_plural'] = i18n('movies')
section_params['label_single'] = i18n('movie')
return section_params
def filename_from_title(title, video_type, year=None):
if video_type == VIDEO_TYPES.TVSHOW:
filename = '%s S%sE%s'
filename = filename % (title, '%s', '%s')
else:
if year: title = '%s.%s' % (title, year)
filename = title
filename = re.sub(r'(?!%s)[^\w\-_\.]', '.', filename)
filename = re.sub('\.+', '.', filename)
filename = re.sub(re.compile('(CON|PRN|AUX|NUL|COM\d|LPT\d)\.', re.I), '\\1_', filename)
xbmc.makeLegalFilename(filename)
return filename
def filter_exclusions(hosters):
exclusions = kodi.get_setting('excl_list')
exclusions = exclusions.replace(' ', '')
exclusions = exclusions.lower()
if not exclusions: return hosters
filtered_hosters = []
for hoster in hosters:
if hoster['host'].lower() in exclusions:
logger.log('Excluding %s (%s) from %s' % (hoster['url'], hoster['host'], hoster['class'].get_name()), log_utils.LOGDEBUG)
continue
filtered_hosters.append(hoster)
return filtered_hosters
def filter_quality(video_type, hosters):
qual_filter = 5 - int(kodi.get_setting('%s_quality' % video_type)) # subtract to match Q_ORDER
if qual_filter == 5:
return hosters
else:
return [hoster for hoster in hosters if hoster['quality'] is not None and Q_ORDER[hoster['quality']] <= qual_filter]
def get_sort_key(item):
item_sort_key = []
for field, sign in SORT_FIELDS:
if field == 'none':
break
elif field in SORT_KEYS:
if field == 'source':
value = item['class'].get_name()
else:
value = item[field]
if value in SORT_KEYS[field]:
item_sort_key.append(sign * int(SORT_KEYS[field][value]))
else: # assume all unlisted values sort as worst
item_sort_key.append(sign * -1)
elif field == 'debrid':
if field in item:
item_sort_key.append(sign * bool(item[field]))
else:
item_sort_key.append(0)
else:
if item[field] is None:
item_sort_key.append(sign * -1)
else:
item_sort_key.append(sign * int(item[field]))
# logger.log('item: %s sort_key: %s' % (item, item_sort_key), log_utils.LOGDEBUG)
return tuple(item_sort_key)
def make_source_sort_string(sort_key):
sorted_key = sorted(sort_key.items(), key=lambda x: -x[1])
sort_string = '|'.join([element[0] for element in sorted_key])
return sort_string
def test_stream(hoster):
# parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA
try:
headers = dict([item.split('=') for item in (hoster['url'].split('|')[1]).split('&')])
for key in headers: headers[key] = urllib.unquote_plus(headers[key])
except:
headers = {}
logger.log('Testing Stream: %s from %s using Headers: %s' % (hoster['url'], hoster['class'].get_name(), headers), log_utils.LOGDEBUG)
request = urllib2.Request(hoster['url'].split('|')[0], headers=headers)
msg = ''
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
urllib2.install_opener(opener)
try: http_code = urllib2.urlopen(request, timeout=2).getcode()
except urllib2.URLError as e:
# treat an unhandled url type as success
if hasattr(e, 'reason') and 'unknown url type' in str(e.reason).lower():
return True
else:
if isinstance(e, urllib2.HTTPError):
http_code = e.code
else:
http_code = 600
msg = str(e)
except Exception as e:
if 'unknown url type' in str(e).lower():
return True
else:
logger.log('Exception during test_stream: (%s) %s' % (type(e).__name__, e), log_utils.LOGDEBUG)
http_code = 601
msg = str(e)
if int(http_code) >= 400:
logger.log('Test Stream Failed: Url: %s HTTP Code: %s Msg: %s' % (hoster['url'], http_code, msg), log_utils.LOGDEBUG)
return int(http_code) < 400
def scraper_enabled(name):
# return true if setting exists and set to true, or setting doesn't exist (i.e. '')
return kodi.get_setting('%s-enable' % (name)) in ('true', '')
def make_day(date, use_words=True):
date = to_datetime(date, '%Y-%m-%d').date()
today = datetime.date.today()
day_diff = (date - today).days
date_format = kodi.get_setting('date_format')
fallback_format = '%Y-%m-%d'
try: day = date.strftime(date_format)
except ValueError: day = date.strftime(fallback_format)
if use_words:
if day_diff == -1:
day = 'YDA'
elif day_diff == 0:
day = 'TDA'
elif day_diff == 1:
day = 'TOM'
elif day_diff > 1 and day_diff < 7:
day = date.strftime('%a')
return day
def make_time(utc_ts, setting):
local_time = time.localtime(utc_ts)
if kodi.get_setting(setting) == '1':
time_format = '%H:%M'
time_str = time.strftime(time_format, local_time)
else:
time_format = '%I%p' if local_time.tm_min == 0 else '%I:%M%p'
time_str = time.strftime(time_format, local_time)
if time_str[0] == '0': time_str = time_str[1:]
return time_str
def to_datetime(dt_str, date_format):
# strptime mysteriously fails sometimes with TypeError; this is a hacky workaround
# note, they aren't 100% equal as time.strptime loses fractional seconds but they | |
units, lines = None, follow = False):
result = 0
for unit in self.sortedAfter(units):
exitcode = self.log_unit(unit, lines, follow)
if exitcode < 0:
return exitcode
if exitcode > result:
result = exitcode
return result
def log_unit(self, unit, lines = None, follow = False):
conf = self.load_unit_conf(unit)
if not conf: return -1
return self.log_unit_from(conf, lines, follow)
def log_unit_from(self, conf, lines = None, follow = False):
log_path = self.get_journal_log_from(conf)
if follow:
cmd = [ TAIL_CMD, "-n", str(lines or 10), "-F", log_path ]
logg.debug("journalctl %s -> %s", conf.name(), cmd)
return os.spawnvp(os.P_WAIT, cmd[0], cmd) # type: ignore
elif lines:
cmd = [ TAIL_CMD, "-n", str(lines or 10), log_path ]
logg.debug("journalctl %s -> %s", conf.name(), cmd)
return os.spawnvp(os.P_WAIT, cmd[0], cmd) # type: ignore
elif _no_pager:
cmd = [ CAT_CMD, log_path ]
logg.debug("journalctl %s -> %s", conf.name(), cmd)
return os.spawnvp(os.P_WAIT, cmd[0], cmd) # type: ignore
else:
cmd = [ LESS_CMD, log_path ]
logg.debug("journalctl %s -> %s", conf.name(), cmd)
return os.spawnvp(os.P_WAIT, cmd[0], cmd) # type: ignore
def get_journal_log_from(self, conf):
return os_path(self._root, self.get_journal_log(conf))
def get_journal_log(self, conf):
""" /var/log/zzz.service.log or /var/log/default.unit.log """
filename = os.path.basename(strE(conf.filename()))
unitname = (conf.name() or "default")+".unit"
name = filename or unitname
log_folder = expand_path(self._journal_log_folder, conf.root_mode())
log_file = name.replace(os.path.sep,".") + ".log"
if log_file.startswith("."):
log_file = "dot."+log_file
return os.path.join(log_folder, log_file)
def open_journal_log(self, conf):
log_file = self.get_journal_log_from(conf)
log_folder = os.path.dirname(log_file)
if not os.path.isdir(log_folder):
os.makedirs(log_folder)
return open(os.path.join(log_file), "a")
def get_WorkingDirectory(self, conf):
return conf.get("Service", "WorkingDirectory", "")
def chdir_workingdir(self, conf):
""" if specified then change the working directory """
# the original systemd will start in '/' even if User= is given
if self._root:
os.chdir(self._root)
workingdir = self.get_WorkingDirectory(conf)
if workingdir:
ignore = False
if workingdir.startswith("-"):
workingdir = workingdir[1:]
ignore = True
into = os_path(self._root, self.expand_special(workingdir, conf))
try:
logg.debug("chdir workingdir '%s'", into)
os.chdir(into)
return False
except Exception as e:
if not ignore:
logg.error("chdir workingdir '%s': %s", into, e)
return into
else:
logg.debug("chdir workingdir '%s': %s", into, e)
return None
return None
NotifySocket = collections.namedtuple("NotifySocket", ["socket", "socketfile" ])
def get_notify_socket_from(self, conf, socketfile = None, debug = False):
""" creates a notify-socket for the (non-privileged) user """
notify_socket_folder = expand_path(_notify_socket_folder, conf.root_mode())
notify_folder = os_path(self._root, notify_socket_folder)
notify_name = "notify." + str(conf.name() or "systemctl")
notify_socket = os.path.join(notify_folder, notify_name)
socketfile = socketfile or notify_socket
if len(socketfile) > 100:
# occurs during testsuite.py for ~user/test.tmp/root path
if debug:
logg.debug("https://unix.stackexchange.com/questions/367008/%s",
"why-is-socket-path-length-limited-to-a-hundred-chars")
logg.debug("old notify socketfile (%s) = %s", len(socketfile), socketfile)
notify_name44 = o44(notify_name)
notify_name77 = o77(notify_name)
socketfile = os.path.join(notify_folder, notify_name77)
if len(socketfile) > 100:
socketfile = os.path.join(notify_folder, notify_name44)
pref = "zz.%i.%s" % (get_USER_ID(),o22(os.path.basename(notify_socket_folder)))
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name77)
if len(socketfile) > 100: # pragma: no cover
socketfile = os.path.join(get_TMP(), pref, notify_name44)
if len(socketfile) > 100: # pragma: no cover
socketfile = os.path.join(get_TMP(), notify_name44)
if debug:
logg.info("new notify socketfile (%s) = %s", len(socketfile), socketfile)
return socketfile
def notify_socket_from(self, conf, socketfile = None):
socketfile = self.get_notify_socket_from(conf, socketfile, debug=True)
try:
if not os.path.isdir(os.path.dirname(socketfile)):
os.makedirs(os.path.dirname(socketfile))
if os.path.exists(socketfile):
os.unlink(socketfile)
except Exception as e:
logg.warning("error %s: %s", socketfile, e)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(socketfile)
os.chmod(socketfile, 0o777) # the service my run under some User=setting
return Systemctl.NotifySocket(sock, socketfile)
def read_notify_socket(self, notify, timeout):
notify.socket.settimeout(timeout or DefaultMaximumTimeout)
result = ""
try:
result, client_address = notify.socket.recvfrom(4096)
assert isinstance(result, bytes)
if result:
result = result.decode("utf-8")
result_txt = result.replace("\n","|")
result_len = len(result)
logg.debug("read_notify_socket(%s):%s", result_len, result_txt)
except socket.timeout as e:
if timeout > 2:
logg.debug("socket.timeout %s", e)
return result
def wait_notify_socket(self, notify, timeout, pid = None, pid_file = None):
if not os.path.exists(notify.socketfile):
logg.info("no $NOTIFY_SOCKET exists")
return {}
#
lapseTimeout = max(3, int(timeout / 100))
mainpidTimeout = lapseTimeout # Apache sends READY before MAINPID
status = ""
logg.info("wait $NOTIFY_SOCKET, timeout %s (lapse %s)", timeout, lapseTimeout)
waiting = " ---"
results = {}
for attempt in xrange(int(timeout)+1):
if pid and not self.is_active_pid(pid):
logg.info("seen dead PID %s", pid)
return results
if not attempt: # first one
time.sleep(1) # until TimeoutStartSec
continue
result = self.read_notify_socket(notify, 1) # sleep max 1 second
for line in result.splitlines():
# for name, value in self.read_env_part(line)
if "=" not in line:
continue
name, value = line.split("=", 1)
results[name] = value
if name in ["STATUS", "ACTIVESTATE", "MAINPID", "READY"]:
hint="seen notify %s " % (waiting)
logg.debug("%s :%s=%s", hint, name, value)
if status != results.get("STATUS",""):
mainpidTimeout = lapseTimeout
status = results.get("STATUS", "")
if "READY" not in results:
time.sleep(1) # until TimeoutStart
continue
if "MAINPID" not in results and not pid_file:
mainpidTimeout -= 1
if mainpidTimeout > 0:
waiting = "%4i" % (-mainpidTimeout)
time.sleep(1) # until TimeoutStart
continue
break # READY and MAINPID
if "READY" not in results:
logg.info(".... timeout while waiting for 'READY=1' status on $NOTIFY_SOCKET")
elif "MAINPID" not in results:
logg.info(".... seen 'READY=1' but no MAINPID update status on $NOTIFY_SOCKET")
logg.debug("notify = %s", results)
try:
notify.socket.close()
except Exception as e:
logg.debug("socket.close %s", e)
return results
def start_modules(self, *modules):
""" [UNIT]... -- start these units
/// SPECIAL: with --now or --init it will
run the init-loop and stop the units afterwards """
found_all = True
units = []
for module in modules:
matched = self.match_units(to_list(module))
if not matched:
logg.error("Unit %s not found.", unit_of(module))
self.error |= NOT_FOUND
found_all = False
continue
for unit in matched:
if unit not in units:
units += [ unit ]
init = self._now or self._init
return self.start_units(units, init) and found_all
def start_units(self, units, init = None):
""" fails if any unit does not start
/// SPECIAL: may run the init-loop and
stop the named units afterwards """
self.wait_system()
done = True
started_units = []
for unit in self.sortedAfter(units):
started_units.append(unit)
if not self.start_unit(unit):
done = False
if init:
logg.info("init-loop start")
sig = self.init_loop_until_stop(started_units)
logg.info("init-loop %s", sig)
for unit in reversed(started_units):
self.stop_unit(unit)
return done
def start_unit(self, unit):
conf = self.load_unit_conf(unit)
if conf is None:
logg.debug("unit could not be loaded (%s)", unit)
logg.error("Unit %s not found.", unit)
return False
if self.not_user_conf(conf):
logg.error("Unit %s not for --user mode", unit)
return False
return self.start_unit_from(conf)
def get_TimeoutStartSec(self, conf):
timeout = conf.get("Service", "TimeoutSec", strE(DefaultTimeoutStartSec))
timeout = conf.get("Service", "TimeoutStartSec", timeout)
return time_to_seconds(timeout, DefaultMaximumTimeout)
def get_SocketTimeoutSec(self, conf):
timeout = conf.get("Socket", "TimeoutSec", strE(DefaultTimeoutStartSec))
return time_to_seconds(timeout, DefaultMaximumTimeout)
def get_RemainAfterExit(self, conf):
return conf.getbool("Service", "RemainAfterExit", "no")
def start_unit_from(self, conf):
if not conf: return False
if self.syntax_check(conf) > 100: return False
with waitlock(conf):
logg.debug(" start unit %s => %s", conf.name(), strQ(conf.filename()))
return self.do_start_unit_from(conf)
def do_start_unit_from(self, conf):
if conf.name().endswith(".service"):
return self.do_start_service_from(conf)
elif conf.name().endswith(".socket"):
return self.do_start_socket_from(conf)
elif conf.name().endswith(".target"):
return self.do_start_target_from(conf)
else:
logg.error("start not implemented for unit type: %s", conf.name())
return False
def do_start_service_from(self, conf):
timeout = self.get_TimeoutStartSec(conf)
doRemainAfterExit = self.get_RemainAfterExit(conf)
runs = conf.get("Service", "Type", "simple").lower()
env = self.get_env(conf)
if not self._quiet:
okee = self.exec_check_unit(conf, env, "Service", "Exec") # all...
if not okee and _no_reload: return False
service_directories = self.create_service_directories(conf)
env.update(service_directories) # atleast sshd did check for /run/sshd
# for StopPost on failure:
returncode = 0
service_result = "success"
if True:
if runs in [ "simple", "forking", "notify", "idle" ]:
env["MAINPID"] = strE(self.read_mainpid_from(conf))
for cmd in conf.getlist("Service", "ExecStartPre", []):
exe, newcmd = self.exec_newcmd(cmd, env, conf)
logg.info(" pre-start %s", shell_cmd(newcmd))
forkpid = os.fork()
if not forkpid:
self.execve_from(conf, newcmd, env) # pragma: no cover
run = subprocess_waitpid(forkpid)
logg.debug(" pre-start done (%s) <-%s>",
run.returncode or "OK", run.signal or "")
if run.returncode and exe.check:
logg.error("the ExecStartPre control process exited with error code")
active = "failed"
self.write_status_from(conf, AS=active )
if _what_kind not in ["none", "keep"]:
self.remove_service_directories(conf) # cleanup that /run/sshd
return False
if runs in [ "oneshot" ]:
status_file = self.get_status_file_from(conf)
if self.get_status_from(conf, "ActiveState", "unknown") == "active":
logg.warning("the service was already up once")
return True
for cmd in conf.getlist("Service", "ExecStart", []):
exe, newcmd = self.exec_newcmd(cmd, env, conf)
logg.info("%s start %s", runs, shell_cmd(newcmd))
forkpid = os.fork()
if not forkpid: # pragma: no cover
os.setsid() # detach child process from parent
self.execve_from(conf, newcmd, env)
run = subprocess_waitpid(forkpid)
if run.returncode and exe.check:
returncode = run.returncode
service_result = "failed"
logg.error("%s start %s (%s) <-%s>", runs, service_result,
run.returncode or "OK", run.signal or "")
break
logg.info("%s start done (%s) <-%s>", | |
kwargs['_return_http_data_only'] = True
return self.get_agent_token_with_http_info(owner, uuid, **kwargs) # noqa: E501
def get_agent_token_with_http_info(self, owner, uuid, **kwargs): # noqa: E501
"""Get agent token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_agent_token_with_http_info(owner, uuid, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param uuid: Uuid identifier of the entity (required)
:type uuid: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_agent_token" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_agent_token`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `get_agent_token`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {
200: "V1Token",
204: "object",
403: "object",
404: "object",
}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{uuid}/token', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_agent_names(self, owner, **kwargs): # noqa: E501
"""List agents names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_agent_names(owner, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param offset: Pagination offset.
:type offset: int
:param limit: Limit size.
:type limit: int
:param sort: Sort to order the search.
:type sort: str
:param query: Query filter the search.
:type query: str
:param bookmarks: Filter by bookmarks.
:type bookmarks: bool
:param mode: Mode of the search.
:type mode: str
:param no_page: No pagination.
:type no_page: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1ListAgentsResponse
"""
kwargs['_return_http_data_only'] = True
return self.list_agent_names_with_http_info(owner, **kwargs) # noqa: E501
def list_agent_names_with_http_info(self, owner, **kwargs): # noqa: E501
"""List agents names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_agent_names_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param offset: Pagination offset.
:type offset: int
:param limit: Limit size.
:type limit: int
:param sort: Sort to order the search.
:type sort: str
:param query: Query filter the search.
:type query: str
:param bookmarks: Filter by bookmarks.
:type bookmarks: bool
:param mode: Mode of the search.
:type mode: str
:param no_page: No pagination.
:type no_page: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1ListAgentsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_agent_names" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_agent_names`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {
200: "V1ListAgentsResponse",
204: "object",
403: "object",
404: "object",
}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/names', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_agents(self, owner, **kwargs): # noqa: E501
"""List agents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_agents(owner, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param offset: Pagination offset.
:type offset: int
:param limit: Limit size.
:type limit: int
:param sort: Sort to order the search.
:type sort: str
:param query: Query filter the search.
:type query: str
:param bookmarks: Filter by bookmarks.
:type bookmarks: bool
:param mode: Mode of the search.
:type mode: str
:param no_page: No pagination.
:type no_page: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If | |
<reponame>threefoldtech/js-sdk<gh_stars>10-100
import base64
import copy
import threading
# import collections
import urllib
from gevent import sleep
from jumpscale.clients.base import Client
from jumpscale.core.base import Base, fields
from jumpscale.loader import j
from github.GithubException import UnknownObjectException
from .base import replacelabels
from .helper import retry
from .issue import Issue
from .milestone import RepoMilestone
class GithubRepo:
TYPES = ["story", "ticket", "task", "bug", "feature", "question", "monitor", "unknown"]
PRIORITIES = ["critical", "urgent", "normal", "minor"]
STATES = ["new", "accepted", "question", "inprogress", "verification", "closed"]
def __init__(self, client, fullname):
self.client = client
self.fullname = fullname
self._repoclient = None
self._labels = None
self._issues = None
self._lock = threading.RLock()
self._milestones = None
def _log_info(self, s):
pass
@property
def api(self):
if self._repoclient is None:
self._repoclient = self.client.get_repo(self.fullname)
return self._repoclient
@property
def name(self):
return self.fullname.split("/", 1)[-1]
@property
def type(self):
if self.name in ["home"]:
return "home"
elif self.name.startswith("proj"):
return "proj"
elif self.name.startswith("org_"):
return "org"
elif self.name.startswith("www"):
return "www"
elif self.name.startswith("doc"):
return "doc"
elif self.name.startswith("cockpit"):
return "cockpit"
else:
return "code"
@property
def labelnames(self):
return [item.name for item in self.labels]
@property
def labels(self):
with self._lock:
if self._labels is None:
self._labels = [item for item in self.api.get_labels()]
return self._labels
@property
def branches(self):
"""list of `Branch` objects"""
return list(self.api.get_branches())
@property
def stories(self):
# walk overall issues find the stories (based on type)
# only for home type repo, otherwise return []
return self.issues_by_type("story")
@property
def tasks(self):
# walk overall issues find the stories (based on type)
# only for home type repo, otherwise return []
return self.issues_by_type("task")
def labelsSet(self, labels2set, ignoreDelete=["p_"], delete=True):
"""
@param ignore all labels starting with ignore will not be deleted
"""
for item in labels2set:
if not isinstance(item, str):
raise Exception("Labels to set need to be in string format, found:%s" % labels2set)
# walk over github existing labels
labelstowalk = copy.copy(self.labels)
for item in labelstowalk:
name = item.name.lower()
if name not in labels2set:
# label in repo does not correspond to label we need
if name in replacelabels:
nameNew = replacelabels[item.name.lower()]
if nameNew not in self.labelnames:
color = self.get_color(name)
self._log_info(
"change label in repo: %s oldlabel:'%s' to:'%s' color:%s"
% (self.fullname, item.name, nameNew, color)
)
item.edit(nameNew, color)
self._labels = None
else:
# no replacement
name = "type_unknown"
color = self.get_color(name)
try:
item.edit(name, color)
except BaseException:
item.delete()
self._labels = None
# walk over new labels we need to set
for name in labels2set:
if name not in self.labelnames:
# does not exist yet in repo
color = self.get_color(name)
self._log_info("create label: %s %s %s" % (self.fullname, name, color))
self.api.create_label(name, color)
self._labels = None
name = ""
if delete:
labelstowalk = copy.copy(self.labels)
for item in labelstowalk:
if item.name not in labels2set:
self._log_info("delete label: %s %s" % (self.fullname, item.name))
ignoreDeleteDo = False
for filteritem in ignoreDelete:
if item.name.startswith(filteritem):
ignoreDeleteDo = True
if ignoreDeleteDo is False:
item.delete()
self._labels = None
# check the colors
labelstowalk = copy.copy(self.labels)
for item in labelstowalk:
# we recognise the label
self._log_info("check color of repo:%s labelname:'%s'" % (self.fullname, item.name))
color = self.get_color(item.name)
if item.color != color:
self._log_info("change label color for repo %s %s" % (item.name, color))
item.edit(item.name, color)
self._labels = None
def getlabel(self, name):
for item in self.labels:
self._log_info("%s:look for name:'%s'" % (item.name, name))
if item.name == name:
return item
raise Exception("Dit not find label: '%s'" % name)
def get_issue_from_markdown(self, issueNumber, markdown):
i = self.get_issue(issueNumber, False)
i._loadMD(markdown)
self.issues.append(i)
return i
def get_issue(self, issueNumber, die=True):
for issue in self.issues:
if issue.number == issueNumber:
return issue
# not found in cache, try to load from github
github_issue = self.api.get_issue(issueNumber)
if github_issue:
issue = Issue(repo=self, githubObj=github_issue)
self._issues.append(issue)
return issue
if die:
raise Exception("cannot find issue:%s in repo:%s" % (issueNumber, self))
else:
i = Issue(self)
i._ddict["number"] = issueNumber
return i
def issues_by_type(self, *types):
"""
filter is method which takes issue as argument and returns True or False to include
"""
issues = []
for issue in self.issues:
if issue.type in types:
issues.append(issue)
return issues
def issues_by_state(self, filter=None):
"""
filter is method which takes issue as argument and returns True or False to include
"""
res = {}
for item in self.states:
res[item] = []
for issue in self.issues:
if issue.state == item:
if filter is None or filter(issue):
res[item].append(issue)
return res
def issues_by_priority(self, filter=None):
"""
filter is method which takes issue as argument and returns True or False to include
"""
res = {}
for item in self.priorities:
res[item] = []
for issue in self.issues:
if issue.priority == item:
if filter is None or filter(issue):
res[item].append(issue)
return res
def issues_by_type_state(self, filter=None, collapsepriority=True):
"""
filter is method which takes issue as argument and returns True or False to include
returns dict of dict keys: type, state and then issues sorted following priority
"""
res = {}
for type in self.types:
res[type] = {}
for state in self.states:
res[type][state] = {}
for priority in self.priorities:
res[type][state][priority] = []
for issue in self.issues:
if issue.type == type and issue.state == state:
if filter is None or filter(issue):
res[type][state][priority].append(issue)
if collapsepriority:
# sort the issues following priority
temp = res[type][state]
res[type][state] = []
for priority in self.priorities:
for subitem in temp[priority]:
res[type][state].append(subitem)
return res
@property
def types(self):
return GithubRepo.TYPES
@property
def priorities(self):
return GithubRepo.PRIORITIES
@property
def states(self):
return GithubRepo.STATES
@property
def milestones(self):
if self._milestones is None:
self._milestones = [RepoMilestone(self, x) for x in self.api.get_milestones()]
return self._milestones
@property
def milestone_titles(self):
return [item.title for item in self.milestones]
@property
def milestone_names(self):
return [item.name for item in self.milestones]
def get_milestone(self, name, die=True):
name = name.strip()
if name == "":
raise Exception("Name cannot be empty.")
for item in self.milestones:
if name == item.name.strip() or name == item.title.strip():
return item
if die:
raise Exception("Could not find milestone with name:%s" % name)
else:
return None
@retry
def create_milestone(self, name, title, description="", deadline="", owner=""):
self._log_info('Attempt to create milestone "%s" [%s] deadline %s' % (name, title, deadline))
def getBody(descr, name, owner):
out = "%s\n\n" % descr
out += "## name:%s\n" % name
out += "## owner:%s\n" % owner
return out
ms = None
for s in [name, title]:
ms = self.get_milestone(s, die=False)
if ms is not None:
break
if ms is not None:
if ms.title != title:
ms.title = title
# if ms.deadline != deadline:
# ms.deadline = deadline
tocheck = getBody(description.strip(), name, owner)
if ms.body.strip() != tocheck.strip():
ms.body = tocheck
else:
# due = j.data.time.epoch2pythonDateTime(int(j.data.time.getEpochFuture(deadline)))
self._log_info("Create milestone on %s: %s" % (self, title))
body = getBody(description.strip(), name, owner)
# workaround for https://github.com/PyGithub/PyGithub/issues/396
milestone = self.api.create_milestone(title=title, description=body)
milestone.edit(title=title)
self._milestones.append(RepoMilestone(self, milestone))
def delete_milestone(self, name):
if name.strip() == "":
raise Exception("Name cannot be empty.")
self._log_info("Delete milestone on %s: '%s'" % (self, name))
try:
ms = self.get_milestone(name)
ms.api.delete()
self._milestones = []
except Exception:
self._log_info("Milestone '%s' doesn't exist. no need to delete" % name)
def _labelsubset(self, cat):
res = []
for item in self.labels:
if item.startswith(cat):
item = item[len(cat) :].strip("_")
res.append(item)
res.sort()
return res
def get_color(self, name):
# colors={'state_question':'fbca04',
# 'priority_urgent':'d93f0b',
# 'state_verification':'006b75',
# 'priority_minor':'',
# 'type_task':'',
# 'type_feature':'',
# 'process_wontfix':"ffffff",
# 'priority_critical':"b60205",
# 'state_inprogress':"e6e6e6",
# 'priority_normal':"e6e6e6",
# 'type_story':"ee9a00",
# 'process_duplicate':"",
# 'state_closed':"5319e7",
# 'type_bug':"fc2929",
# 'state_accepted':"0e8a16",
# 'type_question':"fbca04",
# 'state_new':"1d76db"}
if name.startswith("state"):
return "c2e0c6" # light green
if name.startswith("process"):
return "d4c5f9" # light purple
if name.startswith("type"):
return "fef2c0" # light yellow
if name in ("priority_critical", "task_no_estimation"):
return "b60205" # red
if name.startswith("priority_urgent"):
return "d93f0b"
if name.startswith("priority"):
return "f9d0c4" # roze
return "ffffff"
@retry
def set_file(self, path, content, message="update file"):
"""
Creates or updates the file content at path with given content
:param path: file path `README.md`
:param content: Plain content of file
:return:
"""
bytes = content.encode()
encoded = base64.encodebytes(bytes)
params = {"message": message, "content": encoded.decode()}
path = urllib.parse.quote(path)
try:
obj = self.api.get_contents(path)
params["sha"] = obj.sha
if base64.decodebytes(obj.content.encode()) == bytes:
return
except UnknownObjectException:
pass
self._log_info('Updating file "%s"' % path)
self.api._requester.requestJsonAndCheck("PUT", self.api.url + "/contents/" + path, input=params)
@property
def issues(self):
with self._lock:
if self._issues is None:
issues = []
for item in self.api.get_issues(state="all"):
issues.append(Issue(self, githubObj=item))
self._issues = issues
return self._issues
def download_directory(self, src, download_dir, branch=None):
dest = j.sals.fs.join_paths(download_dir, self.api.full_name)
j.sals.fs.mkdirs(dest)
branch = branch or self.api.default_branch
contents = self.api.get_dir_contents(src, ref=branch)
for content in | |
@raise PDBParseError: if the stream has no HEADER at byte 0
"""
self._stream.seek(0)
header = next(self._stream)
if not header.startswith('HEADER'):
raise PDBParseError('Does not look like a regular PDB file.')
structure = csb.bio.structure.Structure(header.split()[-1])
while True:
try:
line = next(self._stream)
except StopIteration:
break
if line.startswith('COMPND'):
if line[10:].lstrip().startswith('MOL_ID:'):
mol_id = int(line[18:].replace(';', '').strip())
chain_name = ''
chains = ''
while line.startswith('COMPND'):
line = next(self._stream)
if line.split()[2].startswith('MOLECULE:'):
chain_name += line[20:].strip()
while not chain_name.endswith(';'):
line = next(self._stream)
if not line.startswith('COMPND'):
break
chain_name += ' ' + line[11:].strip()
else:
while not line.split()[2].startswith('CHAIN:'):
line = next(self._stream)
if not line.startswith('COMPND'):
raise HeaderFormatError('Missing chain identifier in COMPND section')
chains = line[17:].strip()
while not chains.endswith(';'):
line = next(self._stream)
if not line.startswith('COMPND'):
break
chains += ', ' + line[11:].strip()
break
chain_ids = chains.replace(';', ' ').replace(',', ' ').split() or [''] # the second part deals with an empty chain id
self._add_chains(structure, chain_name, mol_id, *chain_ids)
elif line.startswith('REMARK 2 RESOLUTION'):
structure.resolution = self._read_resolution(line)
elif line.startswith('SEQRES'):
chain_id, residues = self._parse_seqres_line(line, structure)
chain = structure.chains[chain_id]
for residue in residues:
chain.residues.append(residue)
if chain.residues.last_index != residue.rank:
raise HeaderFormatError("Malformed SEQRES")
elif line.startswith('MODEL') or line.startswith('ATOM'):
break
return structure
def _add_chains(self, structure, name, mol_id, *chain_ids):
name = name.strip().rstrip(";")
for chain in chain_ids:
new_chain = csb.bio.structure.Chain(chain, type=SequenceTypes.Unknown,
name=name, accession=structure.accession)
new_chain.molecule_id = mol_id
try:
structure.chains.append(new_chain)
except csb.bio.structure.DuplicateChainIDError:
raise HeaderFormatError('Chain {0} is already defined.'.format(new_chain.id))
def _read_resolution(self, line):
"""
@return: resolution
@rtype: float or None
"""
res = re.search("(\d+(?:\.\d+)?)\s+ANGSTROM", line)
if res and res.groups():
return float(res.group(1))
else:
return None
def _parse_seqres_line(self, line, structure):
"""
Parse a SEQRES line, build and return newly constructed residues.
If the current sequence type of the chain is unknown, try to guess it
before parsing the residues.
@return: parsed chain_id and L{Residue}s
@rtype: 2-tuple: (str, iterable of L{Residue})
"""
residues = []
rownum = int(line[7:10])
chain_id = line[11].strip()
labels = line[18:].split()
if chain_id not in structure.chains:
raise HeaderFormatError('Chain {0} is undefined'.format(chain_id))
chain = structure.chains[chain_id]
if chain.type == SequenceTypes.Unknown:
chain.type = self.guess_chain_type(labels)
for rn, label in enumerate(labels):
rank = rownum * 13 - (13 - (rn + 1))
rtype = self.parse_residue_safe(label, as_type=chain.type)
residue = csb.bio.structure.Residue.create(chain.type, rank=rank, type=rtype)
residue.label = label
residues.append(residue)
return chain_id, residues
class PDBHeaderParser(RegularStructureParser):
"""
Ultra fast PDB HEADER parser. Does not read any structural data.
"""
def _parse_atoms(self, structure, model):
pass
def _parse_ss(self, structure):
pass
def _parse_header(self, model):
return super(PDBHeaderParser, self)._parse_header(model)
class LegacyStructureParser(AbstractStructureParser):
"""
This is a customized PDB parser, which is designed to read both sequence and
atom data from the ATOM section. This is especially useful when parsing PDB
files without a header.
"""
def _parse_header(self, model):
"""
Initialize a structure with residues from the ATOMs section.
@param model: model identifier (e.g. if multiple models exist)
@type model: str
@return: a L{csb.bio.structure.Structure} instance with properly
initialized residues from ATOMs under the specified C{model}.
@rtype: L{csb.bio.structure.Structure}
"""
self._stream.seek(0)
in_atom = False
has_atoms = False
has_model = False
chains = csb.core.OrderedDict()
header = next(self._stream)
if header.startswith('HEADER'):
structure = csb.bio.structure.Structure(header.split()[-1])
else:
self._stream.seek(0)
structure = csb.bio.structure.Structure('NONE')
structure.model_id = None
while True:
try:
line = next(self._stream)
except StopIteration:
break
if line.startswith('MODEL'):
if has_model:
break
else:
self._parse_model_line(line, structure, model)
model = structure.model_id
has_model = True
elif line.startswith('ATOM') \
or (in_atom and line.startswith('HETATM')):
in_atom = True
has_atoms = True
seq_number = self._read_sequence_number(line)
ins_code = self._read_insertion_code(line)
residue_id = (seq_number, ins_code)
label = self._read_residue_raw(line)
chain_id = self._read_chain_id(line)
if chain_id not in chains:
chains[chain_id] = csb.core.OrderedDict()
self._add_chain(structure, chain_id)
if residue_id not in chains[chain_id]:
chains[chain_id][residue_id] = label
chain = structure.chains[chain_id]
if chain.type == SequenceTypes.Unknown:
self._fix_chain(chain, label)
elif in_atom and line.startswith('TER'):
in_atom = False
elif line.startswith('ENDMDL'):
break
elif line.startswith('END'):
break
if not has_atoms:
raise HeaderFormatError("Can't parse legacy structure: no ATOMs found")
for chain in structure.items:
self._build_chain(chain, chains[chain.id])
return structure
def _add_chain(self, structure, chain_id):
new_chain = csb.bio.structure.Chain(chain_id,
type=SequenceTypes.Unknown,
accession=structure.accession)
new_chain.molecule_id = '1'
structure.chains.append(new_chain)
def _build_chain(self, chain, residues):
for residue_id, label in residues.items():
rank = (chain.residues.last_index or 0) + 1
rname = self.parse_residue_safe(label, as_type=chain.type)
residue = csb.bio.structure.Residue.create(chain.type, rank=rank, type=rname)
residue.label = label
residue.id = residue_id
chain.residues.append(residue)
def _fix_chain(self, chain, probe):
try:
chain.type = self.guess_sequence_type(probe)
except UnknownPDBResidueError:
pass
def _map_residues(self, structure, residues):
for chain in structure.items:
for residue_info in residues[chain.id]:
try:
residue = chain.find(residue_info.sequence_number, residue_info.insertion_code)
for atom in residue_info.atoms:
residue.atoms.append(atom)
except csb.bio.structure.EntityNotFoundError:
pass
StructureParser = AbstractStructureParser.create_parser
"""
Alias for L{AbstractStructureParser.create_parser}.
"""
class ResidueInfo(object):
"""
High-performance struct, which functions as a container for unmapped
L{Atom}s.
@note: This object must implement the L{csb.bio.sequence.ResidueInfo}
interface. This is not enforced through inheritance solely
to save some CPU (by exposing public fields and no properties).
However, on an abstract level this object is_a ResidueInfo
and is used to build L{AbstractSequence}s.
"""
__slots__ = ['chain', 'rank', 'id' , 'sequence_number', 'insertion_code', 'type', 'label', 'atoms']
def __init__(self, chain, rank, id, seq_number, ins_code, type, label):
self.chain = chain
self.rank = rank
self.id = id
self.sequence_number = seq_number
self.insertion_code = ins_code
self.type = type
self.label = label
self.atoms = []
@property
def is_modified(self):
if self.type.enum is SequenceAlphabets.Nucleic:
return self.label != str(self.type)
else:
return self.label != repr(self.type)
class SparseChainSequence(csb.bio.sequence.ChainSequence):
"""
Sequence view for reference (SEQRES) or sparse (ATOM) PDB chains.
The residue instances passed to the constructor must be
L{csb.bio.structure.Residue} or L{csb.bio.io.wwpdb.ResidueInfo} objects.
See L{csb.bio.sequence.AbstractSequence} for details.
"""
def _add(self, residue):
if not isinstance(residue, (csb.bio.structure.Residue, ResidueInfo)):
raise TypeError(residue)
else:
self._residues.append(residue)
def _get(self, rank):
return self._residues[rank - 1]
@staticmethod
def create(chain):
"""
Create a new L{SparseChainSequence} from existing L{Chain}.
@type chain: L{csb.bio.structure.Chain}
@rtype: L{SparseChainSequence}
"""
return SparseChainSequence(
chain.entry_id, chain.header, chain.residues, chain.type)
class AbstractResidueMapper(object):
"""
Defines the base interface of all residue mappers, used to align PDB ATOM
records to the real (SEQRES) sequence of a chain.
"""
__metaclass__ = ABCMeta
@abstractmethod
def map(self, sparse, reference):
"""
Map L{sparse}'s residues to L{reference}. Return all C{sparse} residues,
aligned over C{reference}, with artificial gap residues inserted at
relevant positions. The resulting sequence of sparse residues will
always have the same length as the C{reference} sequence.
@note: C{sparse}'s ranks won't be touched because the C{rank} property
of the underlying L{ResidueInfo} implementation is not necessarily r/w.
@param sparse: sparse sequence (e.g. derived from ATOMS records)
@type sparse: L{SparseChainSequence}
@param reference: reference, complete sequence
(e.g. derived from SEQRES records)
@type reference: L{SparseChainSequence}
@return: all C{sparse} residues, optimally aligned over C{reference}
(with gaps)
@rtype: L{SparseChainSequence}
@raise ResidueMappingError: if the specified sequences are not alignable
"""
pass
def create_gap(self, alphabet=SequenceAlphabets.Protein):
"""
Create and return a new gap residue.
@param alphabet: sequence alphabet; a member of L{SequenceAlphabets}
which has GAP item
@type alphabet: L{enum}
@rtype: L{ResidueInfo}
"""
return ResidueInfo(None, -1, None, None, None, alphabet.GAP, "-")
def _build(self, sparse, aligned):
return SparseChainSequence(
sparse.id, sparse.header, aligned, sparse.type)
class FastResidueMapper(AbstractResidueMapper):
"""
RegExp-based residue mapper. Fails on heavily malformed input (i.e. it cannot
insert gaps in the C{reference}), but it is very fast (linear) and memory
efficient.
"""
MAX_FRAGMENTS = 20
MIN_UNICODE_CHAR = 300
FORBIDDEN_CHARS = set('^.*?()-')
CODEC = "utf-8"
DELIMITER = ").*?(".encode(CODEC).decode(CODEC)
PATTERN = "^.*?({0}).*?$".encode(CODEC).decode(CODEC)
def __init__(self):
self._charcode = FastResidueMapper.MIN_UNICODE_CHAR
self._cache = {}
def map(self, sparse, reference):
aligned = []
mapping = {}
residues = list(sparse.residues)
pattern = self._build_pattern(residues)
seqres = self._encode_sequence(reference)
matches = re.match(pattern, seqres)
if matches:
unmapped_item = -1
for fn, fragment in enumerate(matches.groups(), start=1):
assert fragment != ''
for offset in range(1, len(fragment) + 1):
unmapped_item += 1
rank = matches.start(fn) + offset
mapped_residue = residues[unmapped_item]
real_residue = reference.residues[rank]
assert real_residue.type == mapped_residue.type
mapping[real_residue] = mapped_residue
else:
raise ResidueMappingError("Can't map ATOM records")
for rank, residue in enumerate(reference.residues, start=1):
if residue in mapping:
aligned.append(mapping[residue])
else:
aligned.append(self.create_gap(sparse.alphabet))
assert len(aligned) == reference.length
| |
+ dm_name
latex_name += "," + dm_latex_name
self._name = name + ")"
self._latex_name = latex_name + r"\right)"
self._vmodule = vector_field_module
self._degree = degree
# the member self._ring is created for efficiency (to avoid calls to
# self.base_ring()):
self._ring = domain.scalar_field_algebra()
Parent.__init__(self, base=self._ring, category=Modules(self._ring))
self._domain = domain
self._dest_map = dest_map
self._ambient_domain = vector_field_module._ambient_domain
# NB: self._zero_element is not constructed here, since no element
# can be constructed here, to avoid some infinite recursion.
#### Parent methods
def _element_constructor_(self, comp=[], frame=None, name=None,
latex_name=None):
r"""
Construct a differential form.
TESTS::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V')
sage: c_xy.<x,y> = U.chart(); c_uv.<u,v> = V.chart()
sage: M.declare_union(U,V)
sage: A = M.diff_form_module(2)
sage: a = A([[0, x*y], [-x*y, 0]], name='a'); a
2-form a on the 2-dimensional differentiable manifold M
sage: a.display(c_xy.frame())
a = x*y dx∧dy
sage: A(0) is A.zero()
True
"""
try:
if comp.is_trivial_zero():
return self.zero()
except AttributeError:
if comp == 0:
return self.zero()
if isinstance(comp, (DiffForm, DiffFormParal)):
# coercion by domain restriction
if (self._degree == comp._tensor_type[1]
and self._domain.is_subset(comp._domain)
and self._ambient_domain.is_subset(comp._ambient_domain)):
return comp.restrict(self._domain)
else:
raise TypeError("cannot convert the {} ".format(comp) +
"to an element of {}".format(self))
if isinstance(comp, TensorField):
# coercion of a tensor of type (0,1) to a linear form
tensor = comp # for readability
if (tensor.tensor_type() == (0,1) and self._degree == 1
and tensor._vmodule is self._vmodule):
resu = self.element_class(self._vmodule, 1, name=tensor._name,
latex_name=tensor._latex_name)
for dom, rst in tensor._restrictions.items():
resu._restrictions[dom] = dom.diff_form_module(1)(rst)
return resu
else:
raise TypeError("cannot convert the {} ".format(tensor) +
"to an element of {}".format(self))
if not isinstance(comp, (list, tuple)):
raise TypeError("cannot convert the {} ".format(comp) +
"to an element of {}".format(self))
# standard construction
resu = self.element_class(self._vmodule, self._degree, name=name,
latex_name=latex_name)
if comp:
resu.set_comp(frame)[:] = comp
return resu
def _an_element_(self):
r"""
Construct some (unnamed) differential form.
TESTS::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V')
sage: c_xy.<x,y> = U.chart(); c_uv.<u,v> = V.chart()
sage: M.declare_union(U,V)
sage: A = M.diff_form_module(2)
sage: A._an_element_()
2-form on the 2-dimensional differentiable manifold M
"""
resu = self.element_class(self._vmodule, self._degree)
for oc in self._domain.open_covers(trivial=False):
# the first non-trivial open cover is selected
for dom in oc:
vmodule_dom = dom.vector_field_module(
dest_map=self._dest_map.restrict(dom))
dmodule_dom = vmodule_dom.dual_exterior_power(self._degree)
resu.set_restriction(dmodule_dom._an_element_())
return resu
return resu
def _coerce_map_from_(self, other):
r"""
Determine whether coercion to ``self`` exists from other parent.
TESTS::
sage: M = Manifold(3, 'M')
sage: A1 = M.diff_form_module(1)
sage: A1._coerce_map_from_(M.tensor_field_module((0,1)))
True
sage: A2 = M.diff_form_module(2)
sage: A2._coerce_map_from_(M.tensor_field_module((0,2)))
False
sage: U = M.open_subset('U')
sage: A2U = U.diff_form_module(2)
sage: A2U._coerce_map_from_(A2)
True
sage: A2._coerce_map_from_(A2U)
False
"""
if isinstance(other, (DiffFormModule, DiffFormFreeModule)):
# coercion by domain restriction
return (self._degree == other._degree
and self._domain.is_subset(other._domain)
and self._ambient_domain.is_subset(other._ambient_domain))
from sage.manifolds.differentiable.tensorfield_module import TensorFieldModule
if isinstance(other, TensorFieldModule):
# coercion of a type-(0,1) tensor to a linear form
return (self._vmodule is other._vmodule and self._degree == 1
and other.tensor_type() == (0,1))
return False
@cached_method
def zero(self):
"""
Return the zero of ``self``.
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2)
sage: A2.zero()
2-form zero on the 3-dimensional differentiable manifold M
"""
zero = self._element_constructor_(name='zero', latex_name='0')
for frame in self._domain._frames:
if self._dest_map.restrict(frame._domain) == frame._dest_map:
zero.add_comp(frame)
# (since new components are initialized to zero)
zero._is_zero = True # This element is certainly zero
zero.set_immutable()
return zero
#### End of Parent methods
def _repr_(self):
r"""
Return a string representation of the object.
TESTS::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2)
sage: A2
Module Omega^2(M) of 2-forms on
the 3-dimensional differentiable manifold M
"""
description = "Module "
if self._name is not None:
description += self._name + " "
description += "of {}-forms ".format(self._degree)
if self._dest_map is self._domain.identity_map():
description += "on the {}".format(self._domain)
else:
description += "along the {} mapped into the {}".format(
self._domain, self._ambient_domain)
return description
def _latex_(self):
r"""
Return a LaTeX representation of the object.
TESTS::
sage: M = Manifold(3, 'M', latex_name=r'\mathcal{M}')
sage: A2 = M.diff_form_module(2)
sage: A2._latex_()
'\\Omega^{2}\\left(\\mathcal{M}\\right)'
sage: latex(A2) # indirect doctest
\Omega^{2}\left(\mathcal{M}\right)
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
else:
return self._latex_name
def base_module(self):
r"""
Return the vector field module on which the differential form module
``self`` is constructed.
OUTPUT:
- a
:class:`~sage.manifolds.differentiable.vectorfield_module.VectorFieldModule`
representing the module on which ``self`` is defined
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: A2 = M.diff_form_module(2) ; A2
Module Omega^2(M) of 2-forms on the 3-dimensional differentiable
manifold M
sage: A2.base_module()
Module X(M) of vector fields on the 3-dimensional differentiable
manifold M
sage: A2.base_module() is M.vector_field_module()
True
sage: U = M.open_subset('U')
sage: A2U = U.diff_form_module(2) ; A2U
Module Omega^2(U) of 2-forms on the Open subset U of the
3-dimensional differentiable manifold M
sage: A2U.base_module()
Module X(U) of vector fields on the Open subset U of the
3-dimensional differentiable manifold M
"""
return self._vmodule
def degree(self):
r"""
Return the degree of the differential forms in ``self``.
OUTPUT:
- integer `p` such that ``self`` is a set of `p`-forms
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: M.diff_form_module(1).degree()
1
sage: M.diff_form_module(2).degree()
2
sage: M.diff_form_module(3).degree()
3
"""
return self._degree
# *****************************************************************************
class DiffFormFreeModule(ExtPowerDualFreeModule):
r"""
Free module of differential forms of a given degree `p` (`p`-forms) along
a differentiable manifold `U` with values on a parallelizable manifold `M`.
Given a differentiable manifold `U` and a differentiable map
`\Phi:\; U \rightarrow M` to a parallelizable manifold `M` of dimension
`n`, the set `\Omega^p(U, \Phi)` of `p`-forms along `U` with values on `M`
is a free module of rank `\binom{n}{p}` over `C^k(U)`, the commutative
algebra of differentiable scalar fields on `U` (see
:class:`~sage.manifolds.differentiable.scalarfield_algebra.DiffScalarFieldAlgebra`).
The standard case of `p`-forms *on* a differentiable manifold `M`
corresponds to `U = M` and `\Phi = \mathrm{Id}_M`. Other common cases are
`\Phi` being an immersion and `\Phi` being a curve in `M` (`U` is then an
open interval of `\RR`).
.. NOTE::
This class implements `\Omega^p(U, \Phi)` in the case where `M` is
parallelizable; `\Omega^p(U, \Phi)` is then a *free* module. If `M`
is not parallelizable, the class :class:`DiffFormModule` must be used
instead.
INPUT:
- ``vector_field_module`` -- free module `\mathfrak{X}(U,\Phi)` of vector
fields along `U` associated with the map `\Phi: U \rightarrow V`
- ``degree`` -- positive integer; the degree `p` of the differential forms
EXAMPLES:
Free module of 2-forms on a parallelizable 3-dimensional manifold::
sage: M = Manifold(3, 'M')
sage: X.<x,y,z> = M.chart()
sage: XM = M.vector_field_module() ; XM
Free module X(M) of vector fields on the 3-dimensional differentiable
manifold M
sage: A = M.diff_form_module(2) ; A
Free module Omega^2(M) of 2-forms on the 3-dimensional differentiable
manifold M
sage: latex(A)
\Omega^{2}\left(M\right)
``A`` is nothing but the second exterior power of the dual of ``XM``, i.e.
we have `\Omega^{2}(M) = \Lambda^2(\mathfrak{X}(M)^*)` (see
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerDualFreeModule`)::
sage: A is XM.dual_exterior_power(2)
True
`\Omega^{2}(M)` is a module over the algebra `C^k(M)` of (differentiable)
scalar fields on `M`::
sage: A.category()
Category of finite dimensional modules over Algebra of differentiable
scalar fields on the 3-dimensional differentiable manifold M
sage: CM = M.scalar_field_algebra() ; CM
Algebra of differentiable scalar fields on the 3-dimensional
differentiable manifold M
sage: A in Modules(CM)
True
sage: A.base_ring()
Algebra of differentiable scalar fields on
the 3-dimensional differentiable manifold M
sage: A.base_module()
Free module X(M) of vector fields on
the 3-dimensional differentiable manifold M
sage: A.base_module() is XM
True
sage: A.rank()
3
Elements can be constructed from `A`. In particular, ``0`` yields
the zero element of `A`::
sage: A(0)
2-form zero on the 3-dimensional differentiable manifold M
sage: A(0) is A.zero()
True
while non-zero elements are constructed by providing their components
in a given vector frame::
sage: comp = [[0,3*x,-z],[-3*x,0,4],[z,-4,0]]
sage: a = A(comp, frame=X.frame(), name='a') ; a
2-form a on the 3-dimensional differentiable manifold M
sage: a.display()
a = 3*x dx∧dy - z dx∧dz + 4 dy∧dz
An alternative is to construct the 2-form from an empty list of
components and to set the nonzero nonredundant components afterwards::
sage: a = A([], name='a')
sage: a[0,1] = 3*x # component in the manifold's default frame
sage: a[0,2] = -z
sage: a[1,2] = 4
sage: a.display()
| |
(0 0, 1 1)>
"""
if not all(np.isscalar(val) for val in [xmin, ymin, xmax, ymax]):
raise TypeError("xmin/ymin/xmax/ymax only accepts scalar values")
return lib.clip_by_rect(
geometry,
np.double(xmin),
np.double(ymin),
np.double(xmax),
np.double(ymax),
**kwargs
)
@multithreading_enabled
def convex_hull(geometry, **kwargs):
"""Computes the minimum convex geometry that encloses an input geometry.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> convex_hull(Geometry("MULTIPOINT (0 0, 10 0, 10 10)"))
<pygeos.Geometry POLYGON ((0 0, 10 10, 10 0, 0 0))>
>>> convex_hull(Geometry("POLYGON EMPTY"))
<pygeos.Geometry GEOMETRYCOLLECTION EMPTY>
"""
return lib.convex_hull(geometry, **kwargs)
@multithreading_enabled
def delaunay_triangles(geometry, tolerance=0.0, only_edges=False, **kwargs):
"""Computes a Delaunay triangulation around the vertices of an input
geometry.
The output is a geometrycollection containing polygons (default)
or linestrings (see only_edges). Returns an None if an input geometry
contains less than 3 vertices.
Parameters
----------
geometry : Geometry or array_like
tolerance : float or array_like, default 0.0
Snap input vertices together if their distance is less than this value.
only_edges : bool or array_like, default False
If set to True, the triangulation will return a collection of
linestrings instead of polygons.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> points = Geometry("MULTIPOINT (50 30, 60 30, 100 100)")
>>> delaunay_triangles(points)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((50 30, 60 30, 100 100, 50 30)))>
>>> delaunay_triangles(points, only_edges=True)
<pygeos.Geometry MULTILINESTRING ((50 30, 100 100), (50 30, 60 30), (60 30, ...>
>>> delaunay_triangles(Geometry("MULTIPOINT (50 30, 51 30, 60 30, 100 100)"), tolerance=2)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((50 30, 60 30, 100 100, 50 30)))>
>>> delaunay_triangles(Geometry("POLYGON ((50 30, 60 30, 100 100, 50 30))"))
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((50 30, 60 30, 100 100, 50 30)))>
>>> delaunay_triangles(Geometry("LINESTRING (50 30, 60 30, 100 100)"))
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((50 30, 60 30, 100 100, 50 30)))>
>>> delaunay_triangles(Geometry("GEOMETRYCOLLECTION EMPTY"))
<pygeos.Geometry GEOMETRYCOLLECTION EMPTY>
"""
return lib.delaunay_triangles(geometry, tolerance, only_edges, **kwargs)
@multithreading_enabled
def envelope(geometry, **kwargs):
"""Computes the minimum bounding box that encloses an input geometry.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> envelope(Geometry("LINESTRING (0 0, 10 10)"))
<pygeos.Geometry POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))>
>>> envelope(Geometry("MULTIPOINT (0 0, 10 0, 10 10)"))
<pygeos.Geometry POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))>
>>> envelope(Geometry("POINT (0 0)"))
<pygeos.Geometry POINT (0 0)>
>>> envelope(Geometry("GEOMETRYCOLLECTION EMPTY"))
<pygeos.Geometry POINT EMPTY>
"""
return lib.envelope(geometry, **kwargs)
@multithreading_enabled
def extract_unique_points(geometry, **kwargs):
"""Returns all distinct vertices of an input geometry as a multipoint.
Note that only 2 dimensions of the vertices are considered when testing
for equality.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> extract_unique_points(Geometry("POINT (0 0)"))
<pygeos.Geometry MULTIPOINT (0 0)>
>>> extract_unique_points(Geometry("LINESTRING(0 0, 1 1, 1 1)"))
<pygeos.Geometry MULTIPOINT (0 0, 1 1)>
>>> extract_unique_points(Geometry("POLYGON((0 0, 1 0, 1 1, 0 0))"))
<pygeos.Geometry MULTIPOINT (0 0, 1 0, 1 1)>
>>> extract_unique_points(Geometry("MULTIPOINT (0 0, 1 1, 0 0)"))
<pygeos.Geometry MULTIPOINT (0 0, 1 1)>
>>> extract_unique_points(Geometry("LINESTRING EMPTY"))
<pygeos.Geometry MULTIPOINT EMPTY>
"""
return lib.extract_unique_points(geometry, **kwargs)
@requires_geos("3.8.0")
@multithreading_enabled
def build_area(geometry, **kwargs):
"""Creates an areal geometry formed by the constituent linework of given geometry.
Equivalent of the PostGIS ST_BuildArea() function.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> build_area(Geometry("GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), POLYGON((1 1, 1 2, 2 2, 1 1)))"))
<pygeos.Geometry POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 2, 1 2, 1 1))>
"""
return lib.build_area(geometry, **kwargs)
@requires_geos("3.8.0")
@multithreading_enabled
def make_valid(geometry, **kwargs):
"""Repairs invalid geometries.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> make_valid(Geometry("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))"))
<pygeos.Geometry MULTILINESTRING ((0 0, 1 1), (1 1, 1 2))>
"""
return lib.make_valid(geometry, **kwargs)
@multithreading_enabled
def normalize(geometry, **kwargs):
"""Converts Geometry to normal form (or canonical form).
This method orders the coordinates, rings of a polygon and parts of
multi geometries consistently. Typically useful for testing purposes
(for example in combination with ``equals_exact``).
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> p = Geometry("MULTILINESTRING((0 0, 1 1),(2 2, 3 3))")
>>> normalize(p)
<pygeos.Geometry MULTILINESTRING ((2 2, 3 3), (0 0, 1 1))>
"""
return lib.normalize(geometry, **kwargs)
@multithreading_enabled
def point_on_surface(geometry, **kwargs):
"""Returns a point that intersects an input geometry.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> point_on_surface(Geometry("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"))
<pygeos.Geometry POINT (5 5)>
>>> point_on_surface(Geometry("LINESTRING (0 0, 2 2, 10 10)"))
<pygeos.Geometry POINT (2 2)>
>>> point_on_surface(Geometry("MULTIPOINT (0 0, 10 10)"))
<pygeos.Geometry POINT (0 0)>
>>> point_on_surface(Geometry("POLYGON EMPTY"))
<pygeos.Geometry POINT EMPTY>
"""
return lib.point_on_surface(geometry, **kwargs)
def polygonize(geometries, **kwargs):
"""Creates polygons formed from the linework of a set of Geometries.
Polygonizes an array of Geometries that contain linework which
represents the edges of a planar graph. Any type of Geometry may be
provided as input; only the constituent lines and rings will be used to
create the output polygons.
Lines or rings that when combined do not completely close a polygon
will result in an empty GeometryCollection. Duplicate segments are
ignored.
This function returns the polygons within a GeometryCollection.
Individual Polygons can be obtained using ``get_geometry`` to get
a single polygon or ``get_parts`` to get an array of polygons.
MultiPolygons can be constructed from the output using
``pygeos.multipolygons(pygeos.get_parts(pygeos.polygonize(geometries)))``.
Parameters
----------
geometries : array_like
An array of geometries.
axis : int
Axis along which the geometries are polygonized.
The default is to perform a reduction over the last dimension
of the input array. A 1D array results in a scalar geometry.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Returns
-------
GeometryCollection or array of GeometryCollections
See Also
--------
get_parts, get_geometry
polygonize_full
Examples
--------
>>> lines = [
... Geometry("LINESTRING (0 0, 1 1)"),
... Geometry("LINESTRING (0 0, 0 1)"),
... Geometry("LINESTRING (0 1, 1 1)"),
... ]
>>> polygonize(lines)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))>
"""
return lib.polygonize(geometries, **kwargs)
def polygonize_full(geometries, **kwargs):
"""Creates polygons formed from the linework of a set of Geometries and
return all extra outputs as well.
Polygonizes an array of Geometries that contain linework which
represents the edges of a planar graph. Any type of Geometry may be
provided as input; only the constituent lines and rings will be used to
create the output polygons.
This function performs the same polygonization as ``polygonize`` but does
not only return the polygonal result but all extra outputs as well. The
return value consists of 4 elements:
* The polygonal valid output
* **Cut edges**: edges connected on both ends but not part of polygonal output
* **dangles**: edges connected on one end but not part of polygonal output
* **invalid rings**: polygons formed but which are not valid
This function returns the geometries within GeometryCollections.
Individual geometries can be obtained using ``get_geometry`` to get
a single geometry or ``get_parts`` to get an array of geometries.
Parameters
----------
geometries : array_like
An array of geometries.
axis : int
Axis along which the geometries are polygonized.
The default is to perform a reduction over the last dimension
of the input array. A 1D array results in a scalar geometry.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Returns
-------
(polgyons, cuts, dangles, invalid)
tuple of 4 GeometryCollections or arrays of GeometryCollections
See Also
--------
polygonize
Examples
--------
>>> lines = [
... Geometry("LINESTRING (0 0, 1 1)"),
... Geometry("LINESTRING (0 0, 0 1, 1 1)"),
... Geometry("LINESTRING (0 1, 1 1)"),
... ]
>>> polygonize_full(lines) # doctest: +NORMALIZE_WHITESPACE
(<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((1 1, 0 | |
shoot_opt = {
'steps': self.steps,
'displacement': self.displacement,
'voxel_size': voxel_size,
'absolute': self.absolute,
'membrane': self.membrane,
'bending': self.bending,
'lame': self.lame,
'factor': self.factor,
}
greens_prm = {
'absolute': self.absolute,
'membrane': self.membrane,
'bending': self.bending,
'lame': self.lame,
'factor': self.factor,
'voxel_size': voxel_size,
'shape': velocity.shape[1:-1],
}
if self.cache_greens:
if getattr(self, '_greens_prm', None) == greens_prm:
greens = self._greens.to(**utils.backend(velocity))
else:
greens = spatial.greens(**greens_prm, **utils.backend(velocity))
self._greens = greens
self._greens_prm = greens_prm
else:
greens = spatial.greens(**greens_prm, **utils.backend(velocity))
shoot_fn = spatial.shoot_approx if self.approx else spatial.shoot
output = []
if inv:
y, iy = shoot_fn(velocity, greens, return_inverse=True, **shoot_opt)
if fwd:
output.append(y)
output.append(iy)
elif fwd:
y = shoot_fn(velocity, greens, **shoot_opt)
output.append(y)
return output if len(output) > 1 else \
output[0] if len(output) == 1 else \
None
class AffineExp(Module):
"""Exponentiate an inifinitesimal affine transformation (Lie algebra)."""
def __init__(self, dim, basis='CSO', fwd=True, inv=False):
"""
Parameters
----------
dim : {1, 2, 3}
Spatial dimension
basis : basis_like or list[basis_like], default='CSO'
The simplest way to define an affine basis is to choose from
a list of Lie groups:
* 'T' : Translations
* 'SO' : Special Orthogonal (rotations)
* 'SE' : Special Euclidean (translations + rotations)
* 'D' : Dilations (translations + isotropic scalings)
* 'CSO' : Conformal Special Orthogonal
(translations + rotations + isotropic scalings)
* 'SL' : Special Linear (rotations + isovolumic zooms + shears)
* 'GL+' : General Linear [det>0] (rotations + zooms + shears)
* 'Aff+': Affine [det>0] (translations + rotations + zooms + shears)
More complex (hierarchical) encodings can be achieved as well.
See `affine_matrix`.
fwd : bool, default=True
Return the forward transformation.
inv : bool, default=False
Return the inverse transformation.
"""
super().__init__()
self.dim = dim
self.basis = spatial.build_affine_basis(basis, dim)
self.fwd = fwd
self.inv = inv
def forward(self, prm, fwd=None, inv=None):
"""
Parameters
----------
prm : (batch, nb_prm) tensor or list[tensor]
Affine parameters on the Lie algebra.
Returns
-------
forward : (batch, dim+1, dim+1) tensor, optional
Forward matrix
inverse : (batch, dim+1, dim+1) tensor, optional
Inverse matrix
"""
fwd = fwd if fwd is not None else self.fwd
inv = inv if inv is not None else self.inv
output = []
if fwd:
aff = spatial.affine_matrix(prm, self.basis)
output.append(aff)
if inv:
if isinstance(prm, (list, tuple)):
prm = [-p for p in prm]
else:
prm = -prm
iaff = spatial.affine_matrix(prm, self.basis)
output.append(iaff)
return output if len(output) > 1 else \
output[0] if len(output) == 1 else \
None
class AffineLog(Module):
"""Take the Riemannian logarithm of an affine (recovers Lie algebra).
Note
----
The matrix logarithm is currently only implemented on cpu
and is not parallelized across batches (we just call scipy).
This is therefore a quite slow layer, which causes data transfer
between cpu and cuda devices. Hopefully it is not too big a
bottelneck (affines are quite small in size).
"""
def __init__(self, dim, basis='CSO', **backend):
"""
Parameters
----------
dim : int
Number of spatial dimensions
basis : basis_like or list[basis_like], default='CSO'
The simplest way to define an affine basis is to choose from
a list of Lie groups:
* 'T' : Translations
* 'SO' : Special Orthogonal (rotations)
* 'SE' : Special Euclidean (translations + rotations)
* 'D' : Dilations (translations + isotropic scalings)
* 'CSO' : Conformal Special Orthogonal
(translations + rotations + isotropic scalings)
* 'SL' : Special Linear (rotations + isovolumic zooms + shears)
* 'GL+' : General Linear [det>0] (rotations + zooms + shears)
* 'Aff+': Affine [det>0] (translations + rotations + zooms + shears)
More complex (hierarchical) encodings can be achieved as well.
See `affine_matrix`.
"""
super().__init__()
self.basis = spatial.build_affine_basis(basis, dim, **backend)
def forward(self, affine):
"""
Parameters
----------
affine : (batch, dim+1, dim+1) tensor
Returns
-------
logaff : (batch, nbprm) tensor
"""
# When the affine is well conditioned, its log should be real.
# Here, I take the real part just in case.
# Another solution could be to regularise the affine (by loading
# slightly the diagonal) until it is well conditioned -- but
# how would that work with autograd?
backend = utils.backend(affine)
affine = core.linalg.logm(affine.double())
if affine.is_complex():
affine = affine.real
affine = affine.to(**backend)
basis = self.basis.to(**backend)
affine = core.linalg.mdot(affine[:, None, ...], basis[None, ...])
return affine
class AffineClassic(Module):
"""Build an affine by matrix multiplication of individual affines."""
def __init__(self, dim, basis='CSO', logzooms=False):
"""
Parameters
----------
dim : {2, 3}
Spatial dimension
basis : str, default='CSO'
Chosen from a list of Lie groups:
* 'T' : Translations
* 'SO' : Special Orthogonal (rotations)
* 'SE' : Special Euclidean (translations + rotations)
* 'D' : Dilations (translations + isotropic scalings)
* 'CSO' : Conformal Special Orthogonal
(translations + rotations + isotropic scalings)
* 'GL+' : General Linear [det>0] (rotations + zooms + shears)
* 'Aff+': Affine [det>0] (translations + rotations + zooms + shears)
logzooms : bool, default=False
If True, this function will exponentiate the input zoom parameters.
"""
super().__init__()
self.dim = dim
self.basis = basis
self.logzooms = logzooms
def forward(self, prm):
"""
Parameters
----------
prm : (batch, nb_prm) tensor or list[tensor]
Affine parameters, ordered as
(*translations, *rotations, *zooms, *shears).
Returns
-------
affine : (batch, dim+1, dim+1) tensor
Affine matrix
"""
def checkdim(expected, got):
if got != expected:
raise ValueError(f'Expected {expected} parameters for '
f'group {self.basis}({self.dim}) but '
f'got {got}.')
nb_prm = prm.shape[-1]
eps = core.constants.eps(prm.dtype)
if self.basis == 'T':
checkdim(self.dim, nb_prm)
elif self.basis == 'SO':
checkdim(self.dim*(self.dim-1)//2, nb_prm)
elif self.basis == 'SE':
checkdim(self.dim + self.dim*(self.dim-1)//2, nb_prm)
elif self.basis == 'D':
checkdim(self.dim + 1, nb_prm)
translations = prm[..., :self.dim]
zooms = prm[..., -1]
zooms = zooms.expand([*zooms.shape, self.dim])
zooms = zooms.exp() if self.logzooms else zooms.clamp_min(eps)
prm = torch.cat((translations, zooms), dim=-1)
elif self.basis == 'CSO':
checkdim(self.dim + self.dim*(self.dim-1)//2 + 1, nb_prm)
rigid = prm[..., :-1]
zooms = prm[..., -1]
zooms = zooms.expand([*zooms.shape, self.dim])
zooms = zooms.exp() if self.logzooms else zooms.clamp_min(eps)
prm = torch.cat((rigid, zooms), dim=-1)
elif self.basis == 'GL+':
checkdim((self.dim-1)*(self.dim+1), nb_prm)
rigid = prm[..., :self.dim*(self.dim-1)//2]
zooms = prm[..., self.dim*(self.dim-1)//2:(self.dim + self.dim*(self.dim-1)//2)]
zooms = zooms.exp() if self.logzooms else zooms.clamp_min(eps)
strides = prm[..., (self.dim + self.dim*(self.dim-1)//2):]
prm = torch.cat((rigid, zooms, strides), dim=-1)
elif self.basis == 'Aff+':
checkdim(self.dim*(self.dim+1), nb_prm)
rigid = prm[..., :(self.dim + self.dim*(self.dim-1)//2)]
zooms = prm[..., (self.dim + self.dim*(self.dim-1)//2):(2*self.dim + self.dim*(self.dim-1)//2)]
zooms = zooms.exp() if self.logzooms else zooms.clamp_min(eps)
strides = prm[..., (2*self.dim + self.dim*(self.dim-1)//2):]
prm = torch.cat((rigid, zooms, strides), dim=-1)
else:
raise ValueError(f'Unknown basis {self.basis}')
return spatial.affine_matrix_classic(prm, dim=self.dim)
class AffineClassicInverse(Module):
"""Recover affine parameters from an affine matrix."""
def __init__(self, basis='CSO', logzooms=False):
"""
Parameters
----------
basis : str, default='CSO'
Chosen from a list of Lie groups:
* 'T' : Translations
* 'SO' : Special Orthogonal (rotations)
* 'SE' : Special Euclidean (translations + rotations)
* 'D' : Dilations (translations + isotropic scalings)
* 'CSO' : Conformal Special Orthogonal
(translations + rotations + isotropic scalings)
* 'GL+' : General Linear [det>0] (rotations + zooms + shears)
* 'Aff+': Affine [det>0] (translations + rotations + zooms + shears)
logzooms : bool, default=False
If True, this function will return the logarithm of the zooms.
"""
super().__init__()
self.basis = basis
self.logzooms = logzooms
def forward(self, affine):
"""
Parameters
----------
affine : (batch, dim+1, dim+1) tensor
Affine matrix
Returns
-------
prm : (batch, nb_prm) tensor
Parameters
"""
T, R, Z, S = spatial.affine_parameters_classic(affine,
return_stacked=False)
if self.logzooms:
Z = Z.log()
if self.basis == 'T':
return T
elif self.basis == 'SO':
return R
elif self.basis == 'SE':
return torch.cat((T, R), dim=-1)
elif self.basis == 'D':
Z = torch.mean(Z, dim=-1)[..., None]
return torch.cat((T, Z), dim=-1)
elif self.basis == 'CSO':
Z = torch.mean(Z, dim=-1)[..., None]
return torch.cat((T, R, Z), dim=-1)
elif self.basis == 'GL+':
return torch.cat((R, Z, S), dim=-1)
elif self.basis == 'Aff+':
return torch.cat((T, R, Z, S), dim=-1)
else:
raise ValueError(f'Unknown basis {self.basis}')
class AffineGrid(Module):
"""Generate a dense grid from an affine transform."""
def __init__(self, shape=None, shift=False):
"""
Parameters
----------
shape : sequence[int], optional
Output shape of the dense grid.
shift : bool, default=False
Compose the affine with a shift so that the origin is in
the center of the output field of view.
| |
<reponame>sodicarus/channels
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand-pureita / XBMC Plugin
# <NAME>
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# ------------------------------------------------------------
import base64
import re
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "tantifilm"
host = "https://www.tantifilm.video"
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]]
def mainlist(item):
logger.info("[streamondemand-pureita tantifilm] mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Menu' >>>[/COLOR]",
action="menu_movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_new.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Novita'[/COLOR]",
action="peliculas",
url="%s/watch-genre/al-cinema/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_new_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Ultimi inseriti[/COLOR]",
action="peliculas_last",
url="%s/film/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movies_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Menu' >>>[/COLOR]",
action="menu_tvshow",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_new.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Novita[/COLOR]",
extra="series",
action="peliculas_tv",
url="%s/watch-genre/series-tv-featured/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Ultimi Episodi[/COLOR]",
extra="series",
action="peliculas_series",
url="%s/aggiornamenti-giornalieri-serie-tv/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR yellow][I]Cerca Film ...[/I][/COLOR]",
action="search",
extra="series",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"),
Item(channel=__channel__,
title="[COLOR yellow][I]Cerca Serie...[/I][/COLOR]",
action="search",
extra="serie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def menu_movie(item):
logger.info("[streamondemand-pureita tantifilm] menu_movie")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Al Cinema[/COLOR]",
action="peliculas",
url="%s/watch-genre/al-cinema/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_cinema_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Ultimi inseriti[/COLOR]",
action="peliculas_last",
url="%s/film/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_new_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Alta Definizione[/COLOR]",
action="peliculas",
url="%s/watch-genre/altadefinizione/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/hd_movies_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]3D[/COLOR]",
action="peliculas",
url="%s/watch-genre/3d/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_3D_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Sub-ITA[/COLOR]",
action="peliculas",
url="%s/watch-genre/sub-ita/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_sub_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Categorie[/COLOR]",
action="categorias",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"),
Item(channel=__channel__,
title="[COLOR yellow][I]Cerca ...[/I][/COLOR]",
action="search",
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def menu_tvshow(item):
logger.info("[streamondemand-pureita tantifilm] menu_tvshow")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Aggiornamenti per data[/COLOR]",
extra="series",
action="cat_date",
url="%s/aggiornamenti-giornalieri-serie-tv/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Ultime Episodi[/COLOR]",
extra="anime",
action="peliculas_series",
url="%s/aggiornamenti-giornalieri-serie-tv/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Novita'[/COLOR]",
extra="series",
action="peliculas_tv",
url="%s/watch-genre/series-tv-featured/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]HD[/COLOR]",
extra="series",
action="peliculas_tv",
url="%s/watch-genre/serie-altadefinizione/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_series_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Mini Serie[/COLOR]",
extra="series",
action="peliculas_tv",
url="%s/watch-genre/miniserie/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_series_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime - [COLOR orange]Novita'[/COLOR]",
extra="anime",
action="peliculas_tv",
url="%s/watch-genre/anime/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/animation_P.png"),
Item(channel=__channel__,
title="[COLOR yellow][I]Cerca...[/I][/COLOR]",
action="search",
extra="series",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def categorias(item):
logger.info("[streamondemand-pureita tantifilm] categorias")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.get_match(data, '</span>Anime</a></li>(.*?)</ul>')
# Extrae las entradas
patron = "<li><a href='(.*?)'><span></span>(.*?)</a>"
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
if "Serie" in scrapedtitle or "Miniserie" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genre_P.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas(item):
logger.info("[streamondemand-pureita tantifilm] peliculas")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class="mediaWrap mediaWrapAlt"><a href="([^"]+)">'
patron += '<img[^>]+src="([^"]+)"\s*class[^>]+[^>]+></a><div[^>]+>[^>]+><p>([^<]+)</p>'
patron += '.*?<p>\s*([^<]+)\s*</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality in matches:
if not "http" in scrapedthumbnail:
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle=scrapedtitle.replace("streaming", "")
if quality:
quality=" ([COLOR yellow]" + quality.strip() + "[/COLOR])"
quality=quality.replace("HD720", "HD")
else:
quality=""
scrapedplot = ""
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]" + quality,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
plot=scrapedplot,
show=scrapedtitle), tipo="movie"))
# Paginación
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def peliculas_last(item):
logger.info("[streamondemand-pureita tantifilm] peliculas_last")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.get_match(data, 'Ultimi Film</h1>(.*?)<h3>Navigation</h3>')
patron = '<img width="\d+" height="\d+" src="([^"]+)" class=[^>]+>\s*'
patron += '<\/a>\s*<div class="title-film">\s*<a href="([^"]+)" title[^>]+>'
patron += '<p>([^<]+)<\/p>.*?<p>\s*([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedthumbnail, scrapedurl, scrapedtitle, quality in matches:
if not "http" in scrapedthumbnail:
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedthumbnail=scrapedthumbnail.replace("’", "%E2%80%99").replace("-–-", "-%E2%80%93-")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle=scrapedtitle.replace("streaming", "")
scrapedplot = ""
if quality:
quality ="([COLOR yellow]" + quality.strip() + "[/COLOR])"
quality = quality.replace("HD720", "HD")
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]" + quality,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
plot=scrapedplot,
show=scrapedtitle), tipo="movie"))
# Paginación
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def cat_date(item):
logger.info("[streamondemand-pureita tantifilm] cat_date")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="sp-head" title="Expand">\s*([^<]+)\s*</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
if "Serie" in scrapedtitle or "Miniserie" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
action="peliculas_series",
title="[COLOR yellow]" + scrapedtitle + "[/COLOR]",
url=item.url,
fulltitle=scrapedtitle,
show=scrapedtitle,
extra="date",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas_series(item):
logger.info("[streamondemand-pureita tantifilm] peliculas_series")
itemlist = []
minpage = 28
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.get_match(data, '%s(.*?)</div>\s*</div>' % item.fulltitle)
patron = '<p>([^<]+)<a href="([^"]+)" target="_blank" rel="noopener">([^<]+)<\/a><\/p>'
if item.extra=="date":
matches = re.compile(patron, re.DOTALL).findall(bloque)
else:
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedtitle, scrapedurl, ep) in enumerate(matches):
if (p - 1) * minpage > i: continue
if i >= p * minpage: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle=scrapedtitle.replace("streaming –", "").replace("’", "'")
scrapedtitle=scrapedtitle.replace("-)", ")")
scrapedititle=scrapedtitle.strip()
ep=" ([COLOR orange]" + ep + "[/COLOR])"
scrapedplot = ""
scrapedthumbnail = ""
#if "streaming-5/" in scrapedurl:
#continue
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios",
contentType="tv",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]" + ep,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
fulltitle=scrapedtitle,
show=scrapedtitle), tipo='tv'))
# Extrae el paginador
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="peliculas_series",
title="[COLOR orange]Successivi >>[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas_tv(item):
logger.info("[streamondemand-pureita tantifilm] peliculas_tv")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class="mediaWrap mediaWrapAlt"><a href="([^"]+)">'
patron += '<img[^>]+src="([^"]+)"\s*class[^>]+[^>]+></a><div[^>]+>[^>]+><p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
if not "https:" in scrapedthumbnail:
scrapedthumbnail = "".join(['http:', scrapedthumbnail])
scrapedthumbnail = scrapedthumbnail.replace("’", "%E2%80%99").replace("-–-", "-%E2%80%93-")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle=scrapedtitle.replace("streaming", "").replace("’", "'")
scrapedtitle=scrapedtitle.replace("-)", ")").replace("–", "-")
scrapedplot = ""
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios" if not "anime" in item.extra else "episodios_anime",
contentType="tv",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
fulltitle=scrapedtitle,
plot=scrapedplot,
show=scrapedtitle), tipo="tv"))
# Paginación
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas_tv",
extra=item.extra,
title="[COLOR orange]Successivo >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
# ==================================================================================================================================================
def search(item, texto):
logger.info("[streamondemand-pureita tantifilm] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return peliculas_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ==================================================================================================================================================
def peliculas_search(item):
logger.info("[streamondemand-pureita tantifilm] peliculas_search")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class=".*?genre-(.*?)"\s*id[^>]+>\s*[^>]+>.*?'
patron += '<a href="([^"]+)"\s*title="(.*?)"\s*rel="bookmark">\s*<img[^>]+src="([^"]+)"[^>]+>.*?<p>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for type, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in matches:
if not "http:" in scrapedthumbnail:
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedthumbnail=scrapertools.decodeHtmlentities(scrapedthumbnail)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle=scrapedtitle.replace("streaming", "")
scrapedtitle=scrapedtitle.replace("Permalink to", "")
scrapedtitle = scrapedtitle.replace("-)", ")").replace("’", "'").replace("–", "-")
#scrapedplot = ""
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos" if not "serie" in type else "episodios",
contentType="movie" if not "serie" in type else "tv",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
plot=scrapedplot,
show=scrapedtitle), tipo="movie" if not "serie" in type else "tv"))
return itemlist
# ==================================================================================================================================================
def episodios(item):
def load_episodios(html, item, itemlist):
for data in html.splitlines():
# Extrae las entradas
end = data.find('<a ')
if end > 0:
scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
else:
scrapedtitle = ''
if scrapedtitle == '':
patron = '<a\s*href="[^"]+"(?:\s*target="_blank)?>([^<]+)</a>'
scrapedtitle = scrapertools.find_single_match(data, patron).strip()
title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+')
if title == '':
title = scrapedtitle.replace("–", "")
if title != '':
itemlist.append(
Item(channel=__channel__,
action="findvideos_tv",
title="[COLOR azure]" + title + "[/COLOR]",
url=item.url,
thumbnail=item.thumbnail,
extra=data,
fulltitle=title + " - " + item.fulltitle,
plot= "[COLOR orange]" + item.fulltitle + "[/COLOR] " + item.plot,
show=title + " - " + item.show))
logger.info("[streamondemand-pureita tantifilm] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
data = scrapertools.decodeHtmlentities(data)
start = data.find('<div class="sp-wrap sp-wrap-blue">')
end = data.find('<div id="disqus_thread">', start)
data_sub = data[start:end]
starts = []
patron = r".*?Stagione|STAGIONE|MINISERIE|WEBSERIE|SERIE"
matches = re.compile(patron, re.IGNORECASE).finditer(data_sub)
for match in matches:
season_title = match.group()
if season_title != '':
starts.append(match.end())
i = 1
len_starts = len(starts)
while i <= len_starts:
inizio = starts[i - 1]
fine = starts[i] if i < len_starts else -1
html = data_sub[inizio:fine]
load_episodios(html, item, itemlist)
i | |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, <NAME>, <NAME> and <NAME>
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
This module is to compute the various fingerprints based on the provided
fingerprint system. If you have any question please contact me via email.
2016.11.15
@author: <NAME> and <NAME>
Email: <EMAIL> and <EMAIL>
##############################################################################
"""
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem import MACCSkeys
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem.AtomPairs import Torsions
from rdkit import DataStructs
from .estate import CalculateEstateFingerprint as EstateFingerprint
import pybel
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm2D.SigFactory import SigFactory
from rdkit.Chem.Pharm2D import Generate
from .ghosecrippen import GhoseCrippenFingerprint
from .PubChemFingerprints import calcPubChemFingerAll
Version=1.0
similaritymeasure=[i[0] for i in DataStructs.similarityFunctions]
################################################################
def CalculateFP2Fingerprint(mol):
"""
#################################################################
Calculate FP2 fingerprints (1024 bits).
Usage:
result=CalculateFP2Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res={}
NumFinger = 1024
temp = mol.calcfp().bits
for i in temp:
res.update({i:1})
return NumFinger,res
def CalculateFP3Fingerprint(mol):
"""
#################################################################
Calculate FP3 fingerprints (210 bits).
Usage:
result=CalculateFP3Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res={}
NumFinger = 210
temp = mol.calcfp('FP3').bits
for i in temp:
res.update({i:1})
return NumFinger,res
def CalculateFP4Fingerprint(mol):
"""
#################################################################
Calculate FP4 fingerprints (307 bits).
Usage:
result=CalculateFP4Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res={}
NumFinger=307
temp=mol.calcfp('FP4').bits
for i in temp:
res.update({i:1})
return NumFinger,res
def CalculateDaylightFingerprint(mol):
"""
#################################################################
Calculate Daylight-like fingerprint or topological fingerprint
(2048 bits).
Usage:
result=CalculateDaylightFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res={}
NumFinger=2048
bv=FingerprintMols.FingerprintMol(mol)
temp=tuple(bv.GetOnBits())
for i in temp:
res.update({i:1})
return NumFinger,res,bv
def CalculateMACCSFingerprint(mol):
"""
#################################################################
Calculate MACCS keys (166 bits).
Usage:
result=CalculateMACCSFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res={}
NumFinger=166
bv=MACCSkeys.GenMACCSKeys(mol)
temp=tuple(bv.GetOnBits())
for i in temp:
res.update({i:1})
return NumFinger,res,bv
def CalculateEstateFingerprint(mol):
"""
#################################################################
Calculate E-state fingerprints (79 bits).
Usage:
result=CalculateEstateFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
NumFinger=79
res={}
temp=EstateFingerprint(mol)
for i in temp:
if temp[i]>0:
res[i[7:]]=1
return NumFinger,res,temp
def CalculateAtomPairsFingerprint(mol):
"""
#################################################################
Calculate atom pairs fingerprints
Usage:
result=CalculateAtomPairsFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=Pairs.GetAtomPairFingerprint(mol)
return res.GetLength(),res.GetNonzeroElements(),res
def CalculateTopologicalTorsionFingerprint(mol):
"""
#################################################################
Calculate Topological Torsion Fingerprints
Usage:
result=CalculateTopologicalTorsionFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=Torsions.GetTopologicalTorsionFingerprint(mol)
return res.GetLength(),res.GetNonzeroElements(),res
def CalculateMorganFingerprint(mol,radius=2):
"""
#################################################################
Calculate Morgan
Usage:
result=CalculateMorganFingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=AllChem.GetMorganFingerprint(mol,radius)
return res.GetLength(),res.GetNonzeroElements(),res
def CalculateECFP2Fingerprint(mol,radius=1):
"""
#################################################################
Calculate ECFP2
Usage:
result=CalculateECFP2Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=AllChem.GetMorganFingerprint(mol,radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits = 1024))
return fp, res.GetNonzeroElements(), res
def CalculateECFP4Fingerprint(mol,radius=2):
"""
#################################################################
Calculate ECFP4
Usage:
result=CalculateECFP4Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=AllChem.GetMorganFingerprint(mol,radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits = 1024))
return fp, res.GetNonzeroElements(), res
def CalculateECFP6Fingerprint(mol,radius=3):
"""
#################################################################
Calculate ECFP6
Usage:
result=CalculateECFP6Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res=AllChem.GetMorganFingerprint(mol,radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits = 1024))
return fp, res.GetNonzeroElements(), res
def CalculateSimilarityPybel(fp1,fp2):
"""
#################################################################
Calculate Tanimoto similarity between two molecules.
Usage:
result=CalculateSimilarityPybel(fp1,fp2)
Input: fp1 and fp2 are two DataStructs.
Output: result is a Tanimoto similarity value.
#################################################################
"""
intersection = set(fp1[1].keys())& set(fp2[1].keys())
union = set(fp1[1].keys()) | set(fp2[1].keys())
tanimoto = len(intersection) / float(len(union))
return round(tanimoto,3)
def CalculateSimilarityRdkit(fp1,fp2,similarity="Tanimoto"):
"""
#################################################################
Calculate similarity between two molecules.
Usage:
result=CalculateSimilarity(fp1,fp2)
Users can choose 11 different types:
Tanimoto, Dice, Cosine, Sokal, Russel,
RogotGoldberg, AllBit, Kulczynski,
McConnaughey, Asymmetric, BraunBlanquet
Input: fp1 and fp2 are two DataStructs.
Output: result is a similarity value.
#################################################################
"""
temp=DataStructs.similarityFunctions
for i in temp:
if similarity in i[0]:
similarityfunction=i[1]
else:
similarityfunction=temp[0][1]
res=similarityfunction(fp1,fp2)
return round(res,3)
def CalculateFCFP2Fingerprint(mol, radius=1, nBits = 1024):
"""
#################################################################
Calculate FCFP2
Usage:
result=CalculateFCFP2Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
| |
0.25:
characteristic_density_local = characteristic_density
print('characteristic_density_local spotty {}'.format(characteristic_density_local))
else:
characteristic_density_local = float(np.sum(np.in1d(subpix, subpix_annulus_region))) \
/ (hp.nside2pixarea(nside_fracdet, degrees=True) * len(subpix_annulus_region)) # deg^-2
print('characteristic_density_local cleaned up {}'.format(characteristic_density_local))
else:
# Compute the local characteristic density
area_field = np.pi * (0.5**2 - 0.3**2)
n_field = np.sum((angsep_peak > 0.3) & (angsep_peak < 0.5))
characteristic_density_local = n_field / area_field
# If not good azimuthal coverage, revert
cut_annulus = (angsep_peak > 0.3) & (angsep_peak < 0.5)
#phi = np.degrees(np.arctan2(y_full[cut_annulus] - y_peak, x_full[cut_annulus] - x_peak)) # Use full magnitude range, NOT TESTED!!!
phi = np.degrees(np.arctan2(y[cut_annulus] - y_peak, x[cut_annulus] - x_peak)) # Impose magnitude threshold
h = np.histogram(phi, bins=np.linspace(-180., 180., 13))[0]
if np.sum(h > 0) < 10 or np.sum(h > 0.5 * np.median(h)) < 10:
#angsep_peak = np.sqrt((x - x_peak)**2 + (y - y_peak)**2)
characteristic_density_local = characteristic_density
print('Characteristic density local = {:0.1f} deg^-2 = {:0.3f} arcmin^-2'.format(characteristic_density_local, characteristic_density_local / 60.**2))
return characteristic_density_local
########################################################################
def find_peaks(nside, data, characteristic_density, distance_modulus, pix_nside_select, ra_select, dec_select, magnitude_threshold=mag_max, fracdet=None):
"""
Convolve field to find characteristic density and peaks within the selected pixel
"""
# convolve field and find peaks
cut_magnitude_threshold = (data[mag_dered_1] < magnitude_threshold)
proj = ugali.utils.projector.Projector(ra_select, dec_select)
x, y = proj.sphereToImage(data[basis_1][cut_magnitude_threshold], data[basis_2][cut_magnitude_threshold]) # Trimmed magnitude range for hotspot finding
#x_full, y_full = proj.sphereToImage(data[basis_1], data[basis_2]) # If we want to use full magnitude range for significance evaluation
delta_x = 0.01
area = delta_x**2
smoothing = 2. / 60. # Was 3 arcmin
bins = np.arange(-8., 8. + 1.e-10, delta_x)
centers = 0.5 * (bins[0: -1] + bins[1:])
yy, xx = np.meshgrid(centers, centers)
h = np.histogram2d(x, y, bins=[bins, bins])[0]
h_g = scipy.ndimage.filters.gaussian_filter(h, smoothing / delta_x)
factor_array = np.arange(1., 5., 0.05)
rara, decdec = proj.imageToSphere(xx.flatten(), yy.flatten())
cutcut = (ugali.utils.healpix.angToPix(nside, rara, decdec) == pix_nside_select).reshape(xx.shape)
threshold_density = 5 * characteristic_density * area
for factor in factor_array:
h_region, n_region = scipy.ndimage.measurements.label((h_g * cutcut) > (area * characteristic_density * factor))
#print 'factor', factor, n_region, n_region < 10
if n_region < 10:
threshold_density = area * characteristic_density * factor
break
h_region, n_region = scipy.ndimage.measurements.label((h_g * cutcut) > threshold_density)
h_region = np.ma.array(h_region, mask=(h_region < 1))
x_peak_array = []
y_peak_array = []
angsep_peak_array = []
for index in range(1, n_region + 1): # loop over peaks
index_peak = np.argmax(h_g * (h_region == index))
x_peak, y_peak = xx.flatten()[index_peak], yy.flatten()[index_peak]
#print index, np.max(h_g * (h_region == index))
#angsep_peak = np.sqrt((x_full - x_peak)**2 + (y_full - y_peak)**2) # Use full magnitude range, NOT TESTED!!!
angsep_peak = np.sqrt((x - x_peak)**2 + (y - y_peak)**2) # Impose magnitude threshold
x_peak_array.append(x_peak)
y_peak_array.append(y_peak)
angsep_peak_array.append(angsep_peak)
return x_peak_array, y_peak_array, angsep_peak_array
########################################################################
def fit_aperture(proj, distance_modulus, characteristic_density_local, x_peak, y_peak, angsep_peak):
"""
Fit aperture by varing radius and computing the significance
"""
ra_peak_array = []
dec_peak_array = []
r_peak_array = []
sig_peak_array = []
distance_modulus_array = []
n_obs_peak_array = []
n_obs_half_peak_array = []
n_model_peak_array = []
size_array = np.arange(0.01, 0.3, 0.01)
sig_array = np.tile(0., len(size_array))
size_array_zero = np.concatenate([[0.], size_array])
area_array = np.pi * (size_array_zero[1:]**2 - size_array_zero[0:-1]**2)
n_obs_array = np.tile(0, len(size_array))
n_model_array = np.tile(0., len(size_array))
for ii in range(0, len(size_array)):
n_obs = np.sum(angsep_peak < size_array[ii])
n_model = characteristic_density_local * (np.pi * size_array[ii]**2)
sig_array[ii] = np.clip(scipy.stats.norm.isf(scipy.stats.poisson.sf(n_obs, n_model)), 0., 37.5) # Clip at 37.5
n_obs_array[ii] = n_obs
n_model_array[ii] = n_model
ra_peak, dec_peak = proj.imageToSphere(x_peak, y_peak)
index_peak = np.argmax(sig_array)
r_peak = size_array[index_peak]
#if np.max(sig_array) >= 37.5:
# r_peak = 0.5
n_obs_peak = n_obs_array[index_peak]
n_model_peak = n_model_array[index_peak]
n_obs_half_peak = np.sum(angsep_peak < (0.5 * r_peak))
# Compile resilts
print('Candidate: x_peak: {:12.3f}, y_peak: {:12.3f}, r_peak: {:12.3f}, sig: {:12.3f}, ra_peak: {:12.3f}, dec_peak: {:12.3f}'.format(x_peak, y_peak, r_peak, np.max(sig_array), ra_peak, dec_peak))
ra_peak_array.append(ra_peak)
dec_peak_array.append(dec_peak)
r_peak_array.append(r_peak)
#sig_peak_array.append(np.max(sig_array))
sig_peak_array.append(sig_array[index_peak])
distance_modulus_array.append(distance_modulus)
n_obs_peak_array.append(n_obs_peak)
n_obs_half_peak_array.append(n_obs_half_peak)
n_model_peak_array.append(n_model_peak)
return ra_peak_array, dec_peak_array, r_peak_array, sig_peak_array, distance_modulus_array, n_obs_peak_array, n_obs_half_peak_array, n_model_peak_array
########################################################################
# mode = 0
def search_by_distance(nside, data, distance_modulus, pix_nside_select, ra_select, dec_select, magnitude_threshold=mag_max, fracdet=None):
"""
Idea:
Send a data extension that goes to faint magnitudes, e.g., g < 24.
Use the whole region to identify hotspots using a slightly brighter
magnitude threshold, e.g., g < 23, so not susceptible to variations
in depth. Then compute the local field density using a small annulus
around each individual hotspot, e.g., radius 0.3 to 0.5 deg.
fracdet corresponds to a fracdet map (numpy array, assumed to be EQUATORIAL and RING)
"""
print('Distance = {:0.1f} kpc (m-M = {:0.1f})').format(ugali.utils.projector.distanceModulusToDistance(distance_modulus), distance_modulus)
iso = ugali.isochrone.factory(name=isoname, survey=isosurvey, band_1=band_1.lower(), band_2=band_2.lower())
iso.age = 12.
iso.metallicity = 0.0001
iso.distance_modulus = distance_modulus
cut = cut_isochrone_path(data[mag_dered_1], data[mag_dered_2], data[mag_err_1], data[mag_err_2], iso, radius=0.1)
data = data[cut]
print('{} objects left after isochrone cut...').format(len(data))
if (len(data) == 0):
return [], [], [], [], [], [], [], []
# Compute characteristic density at this distance
characteristic_density = compute_char_density(nside, data, ra_select, dec_select, mag_max, fracdet)
ra_peak_array = []
dec_peak_array = []
r_peak_array = []
sig_peak_array = []
distance_modulus_array = []
n_obs_peak_array = []
n_obs_half_peak_array = []
n_model_peak_array = []
proj = ugali.utils.projector.Projector(ra_select, dec_select)
x_peak_array, y_peak_array, angsep_peak_array = find_peaks(nside, data, characteristic_density, distance_modulus, pix_nside_select, ra_select, dec_select, magnitude_threshold, fracdet)
for x_peak, y_peak, angsep_peak in itertools.izip(x_peak_array, y_peak_array, angsep_peak_array):
characteristic_density_local = compute_local_char_density(nside, data, characteristic_density, ra_select, dec_select, x_peak, y_peak, angsep_peak, mag_max, fracdet)
# Aperture fitting
print('Fitting aperture to hotspot...')
ra_peaks, dec_peaks, r_peaks, sig_peaks, distance_moduli, n_obs_peaks, n_obs_half_peaks, n_model_peaks = fit_aperture(proj, distance_modulus, characteristic_density_local, x_peak, y_peak, angsep_peak)
ra_peak_array.append(ra_peaks)
dec_peak_array.append(dec_peaks)
r_peak_array.append(r_peaks)
sig_peak_array.append(sig_peaks)
distance_modulus_array.append(distance_moduli)
n_obs_peak_array.append(n_obs_peaks)
n_obs_half_peak_array.append(n_obs_half_peaks)
n_model_peak_array.append(n_model_peaks)
ra_peak_array = np.concatenate(ra_peak_array)
dec_peak_array = np.concatenate(dec_peak_array)
r_peak_array = np.concatenate(r_peak_array)
sig_peak_array = np.concatenate(sig_peak_array)
distance_modulus_array = np.concatenate(distance_modulus_array)
n_obs_peak_array = np.concatenate(n_obs_peak_array)
n_obs_half_peak_array = np.concatenate(n_obs_half_peak_array)
n_model_peak_array = np.concatenate(n_model_peak_array)
return ra_peak_array, dec_peak_array, r_peak_array, sig_peak_array, distance_modulus_array, n_obs_peak_array, n_obs_half_peak_array, n_model_peak_array
########################################################################
# mode = 1
def search_by_simulation(nside, data, distance_modulus, pix_nside_select, ra_select, dec_select, magnitude_threshold=mag_max, fracdet=None):
"""
Idea:
Send a data extension that goes to faint magnitudes, e.g., g < 24.
Use the whole region to identify hotspots using a slightly brighter
magnitude threshold, e.g., g < 23, so not susceptible to variations
in depth. Then compute the local field density using a small annulus
around each individual hotspot, e.g., radius 0.3 to 0.5 deg.
fracdet corresponds to a fracdet map (numpy array, assumed to be EQUATORIAL and RING)
"""
print('Distance = {:0.1f} kpc (m-M = {:0.1f})').format(ugali.utils.projector.distanceModulusToDistance(distance_modulus), distance_modulus)
iso = ugali.isochrone.factory(name=isoname, survey=isosurvey, band_1=band_1.lower(), band_2=band_2.lower())
iso.age = 12.
iso.metallicity = 0.0001
iso.distance_modulus = distance_modulus
cut = cut_isochrone_path(data[mag_dered_1], data[mag_dered_2], data[mag_err_1], data[mag_err_2], iso, radius=0.1)
data = data[cut]
print('{} objects left after isochrone cut...'.format(len(data)))
print('{} simulated objects left after isochrone cut...'.format(np.sum(data['MC_SOURCE_ID'] != 0)))
if (len(data) == 0):
return [], [], [], [], [], [], [], []
# Compute characteristic density at this distance
characteristic_density = compute_char_density(nside, data, ra_select, dec_select, mag_max, fracdet)
ra_peak_array = []
dec_peak_array = []
r_peak_array = []
sig_peak_array = []
distance_modulus_array = []
n_obs_peak_array = []
n_obs_half_peak_array = []
n_model_peak_array = []
proj = ugali.utils.projector.Projector(ra_select, dec_select)
cut_magnitude_threshold = (data[mag_dered_1] < magnitude_threshold)
x_peak, y_peak = proj.sphereToImage(ra_select, dec_select) # = 0, 0
x, y = proj.sphereToImage(data[basis_1][cut_magnitude_threshold], data[basis_2][cut_magnitude_threshold])
angsep_peak = np.sqrt((x - x_peak)**2 + (y - y_peak)**2)
characteristic_density_local = compute_local_char_density(nside, data, characteristic_density, ra_select, dec_select, x_peak, y_peak, angsep_peak, mag_max, fracdet)
# Aperture fitting
print('Fitting aperture to hotspot...')
ra_peaks, dec_peaks, r_peaks, sig_peaks, distance_moduli, n_obs_peaks, n_obs_half_peaks, n_model_peaks = fit_aperture(proj, distance_modulus, characteristic_density_local, x_peak, y_peak, angsep_peak)
ra_peak_array.append(ra_peaks)
dec_peak_array.append(dec_peaks)
r_peak_array.append(r_peaks)
sig_peak_array.append(sig_peaks)
distance_modulus_array.append(distance_moduli)
n_obs_peak_array.append(n_obs_peaks)
n_obs_half_peak_array.append(n_obs_half_peaks)
n_model_peak_array.append(n_model_peaks)
ra_peak_array = np.concatenate(ra_peak_array)
dec_peak_array = np.concatenate(dec_peak_array)
r_peak_array = np.concatenate(r_peak_array)
sig_peak_array = np.concatenate(sig_peak_array)
distance_modulus_array = np.concatenate(distance_modulus_array)
n_obs_peak_array = np.concatenate(n_obs_peak_array)
n_obs_half_peak_array = np.concatenate(n_obs_half_peak_array)
n_model_peak_array = np.concatenate(n_model_peak_array)
return ra_peak_array, dec_peak_array, r_peak_array, sig_peak_array, distance_modulus_array, n_obs_peak_array, n_obs_half_peak_array, n_model_peak_array
########################################################################
# mode = 2
def search_by_object(nside, data, distance_modulus, pix_nside_select, ra_select, dec_select, magnitude_threshold=mag_max, fracdet=None):
"""
Idea:
Send a data extension that goes to faint magnitudes, e.g., g < 24.
Use the whole region to identify hotspots using a slightly brighter
magnitude threshold, e.g., g < 23, so not susceptible to variations
in depth. Then compute the local field density using a small annulus
around each individual hotspot, e.g., radius 0.3 to 0.5 deg.
fracdet corresponds to a fracdet map (numpy array, assumed to be EQUATORIAL and RING)
"""
print('Distance = {:0.1f} kpc (m-M = {:0.1f})').format(ugali.utils.projector.distanceModulusToDistance(distance_modulus), distance_modulus)
iso = ugali.isochrone.factory(name=isoname, survey=isosurvey, band_1=band_1.lower(), band_2=band_2.lower())
iso.age = 12.
iso.metallicity = 0.0001
iso.distance_modulus = distance_modulus
cut = cut_isochrone_path(data[mag_dered_1], data[mag_dered_2], data[mag_err_1], data[mag_err_2], iso, | |
None): # noqa: E501
raise ApiValueError("Missing the required parameter `ip` when calling `api_v1_object_device_ip_port_port_active_nodes_get`") # noqa: E501
# verify the required parameter 'port' is set
if self.api_client.client_side_validation and ('port' not in local_var_params or # noqa: E501
local_var_params['port'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `port` when calling `api_v1_object_device_ip_port_port_active_nodes_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ip' in local_var_params:
path_params['ip'] = local_var_params['ip'] # noqa: E501
if 'port' in local_var_params:
path_params['port'] = local_var_params['port'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/object/device/{ip}/port/{port}/active_nodes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_object_device_ip_port_port_active_nodes_with_age_get(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_active_nodes_with_age_get # noqa: E501
Returns active_nodes_with_age rows for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_active_nodes_with_age_get(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_object_device_ip_port_port_active_nodes_with_age_get_with_http_info(ip, port, **kwargs) # noqa: E501
def api_v1_object_device_ip_port_port_active_nodes_with_age_get_with_http_info(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_active_nodes_with_age_get # noqa: E501
Returns active_nodes_with_age rows for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_active_nodes_with_age_get_with_http_info(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'ip',
'port'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_object_device_ip_port_port_active_nodes_with_age_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ip' is set
if self.api_client.client_side_validation and ('ip' not in local_var_params or # noqa: E501
local_var_params['ip'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ip` when calling `api_v1_object_device_ip_port_port_active_nodes_with_age_get`") # noqa: E501
# verify the required parameter 'port' is set
if self.api_client.client_side_validation and ('port' not in local_var_params or # noqa: E501
local_var_params['port'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `port` when calling `api_v1_object_device_ip_port_port_active_nodes_with_age_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ip' in local_var_params:
path_params['ip'] = local_var_params['ip'] # noqa: E501
if 'port' in local_var_params:
path_params['port'] = local_var_params['port'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/object/device/{ip}/port/{port}/active_nodes_with_age', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_object_device_ip_port_port_agg_master_get(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_agg_master_get # noqa: E501
Returns the related agg_master table entry for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_agg_master_get(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_object_device_ip_port_port_agg_master_get_with_http_info(ip, port, **kwargs) # noqa: E501
def api_v1_object_device_ip_port_port_agg_master_get_with_http_info(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_agg_master_get # noqa: E501
Returns the related agg_master table entry for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_agg_master_get_with_http_info(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'ip',
'port'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_object_device_ip_port_port_agg_master_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ip' is set
if self.api_client.client_side_validation and ('ip' not in local_var_params or # noqa: E501
local_var_params['ip'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ip` when calling `api_v1_object_device_ip_port_port_agg_master_get`") # noqa: E501
# verify the required parameter 'port' is set
if self.api_client.client_side_validation and ('port' not in local_var_params or # noqa: E501
local_var_params['port'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `port` when calling `api_v1_object_device_ip_port_port_agg_master_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ip' in local_var_params:
path_params['ip'] = local_var_params['ip'] # noqa: E501
if 'port' in local_var_params:
path_params['port'] = local_var_params['port'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/object/device/{ip}/port/{port}/agg_master', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # | |
indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 6:
return 'DISC_SIG_CODE'
if table2Version == 203 and indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 5:
return 'DISC_VERT_CODE'
if table2Version == 203 and indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 4:
return 'DISC_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 3:
return 'DISC_SIG_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 2:
return 'DISC_SIG_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 184 and indicatorOfTypeOfLevel == 202 and level == 1:
return 'DISC_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 6:
return 'DIDC_MAX_CODE'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 5:
return 'DIDC_VERT_CODE'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 4:
return 'DIDC_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 3:
return 'DIDC_MAX_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 2:
return 'DIDC_MAX_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 183 and indicatorOfTypeOfLevel == 202 and level == 1:
return 'DIDC_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 6:
return 'PISC_SIG_CODE'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 5:
return 'PISC_VERT_CODE'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 4:
return 'PISC_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 3:
return 'PISC_SIG_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 2:
return 'PISC_SIG_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 182 and indicatorOfTypeOfLevel == 202 and level == 1:
return 'PISC_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 6:
return 'PIDC_MAX_CODE'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 5:
return 'PIDC_VERT_CODE'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 4:
return 'PIDC_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 3:
return 'PIDC_MAX_TOP_HFT'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 2:
return 'PIDC_MAX_BASE_HFT'
if table2Version == 203 and indicatorOfParameter == 181 and indicatorOfTypeOfLevel == 202 and level == 1:
return 'PIDC_BASE_HFT'
if table2Version == 201 and indicatorOfParameter == 94:
return 'H_B1_LK'
if table2Version == 201 and indicatorOfParameter == 192:
return 'T_B1_LK'
if table2Version == 201 and indicatorOfParameter == 91:
return 'C_T_LK'
if table2Version == 201 and indicatorOfParameter == 95:
return 'H_ML_LK'
if table2Version == 201 and indicatorOfParameter == 191:
return 'T_BOT_LK'
if table2Version == 201 and indicatorOfParameter == 193:
return 'T_WML_LK'
if table2Version == 201 and indicatorOfParameter == 194:
return 'T_MNW_LK'
if table2Version == 201 and indicatorOfParameter == 190:
return 'T_BS_LK'
if table2Version == 201 and indicatorOfParameter == 93:
return 'DP_BS_LK'
if table2Version == 201 and indicatorOfParameter == 92:
return 'GAMSO_LK'
if table2Version == 201 and indicatorOfParameter == 97:
return 'FETCH_LK'
if table2Version == 201 and indicatorOfParameter == 96:
return 'DEPTH_LK'
if table2Version == 202 and indicatorOfParameter == 55:
return 'FR_LAKE'
if table2Version == 201 and indicatorOfParameter == 24 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'ASWDIFU_S'
if table2Version == 201 and indicatorOfParameter == 23 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'ASWDIFD_S'
if table2Version == 201 and indicatorOfParameter == 22 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'ASWDIR_S'
if table2Version == 201 and indicatorOfParameter == 42 and timeRangeIndicator == 0:
return 'TDIV_HUM'
if table2Version == 201 and indicatorOfParameter == 24 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 3:
return 'ASWDIFU_S'
if table2Version == 201 and indicatorOfParameter == 23 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 3:
return 'ASWDIFD_S'
if table2Version == 201 and indicatorOfParameter == 22 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 3:
return 'ASWDIR_S'
if table2Version == 203 and indicatorOfParameter == 58:
return 'CLO'
if table2Version == 203 and indicatorOfParameter == 57:
return 'SUL_PROB'
if table2Version == 2 and indicatorOfParameter == 41:
return 'ABSV'
if table2Version == 203 and indicatorOfParameter == 61:
return 'T_SEA_C'
if table2Version == 2 and indicatorOfParameter == 80:
return 'T_SEA'
if table2Version == 203 and indicatorOfParameter == 60:
return 'PT1M'
if table2Version == 203 and indicatorOfParameter == 93:
return 'C_TYPE'
if table2Version == 202 and indicatorOfParameter == 249:
return 'UVI_MAX_H'
if table2Version == 202 and indicatorOfParameter == 247:
return 'TOT_O3'
if table2Version == 202 and indicatorOfParameter == 243:
return 'UVI_MAX_CS'
if table2Version == 202 and indicatorOfParameter == 242:
return 'UVI_CL_COR'
if table2Version == 202 and indicatorOfParameter == 241:
return 'UVI_B_CS'
if table2Version == 202 and indicatorOfParameter == 240:
return 'UVI_CS_COR'
if table2Version == 208 and indicatorOfParameter == 236:
return 'W_FRSTR_06'
if table2Version == 208 and indicatorOfParameter == 232:
return 'W_FR_01'
if table2Version == 208 and indicatorOfParameter == 213:
return 'U_SVWSK_12'
if table2Version == 208 and indicatorOfParameter == 212:
return 'W_SVW_12'
if table2Version == 208 and indicatorOfParameter == 199:
return 'U_GEWSW_01'
if table2Version == 208 and indicatorOfParameter == 198:
return 'W_GEWSK_01'
if table2Version == 208 and indicatorOfParameter == 197:
return 'W_GEW_01'
if table2Version == 208 and indicatorOfParameter == 191:
return 'W_GLEIS_01'
if table2Version == 208 and indicatorOfParameter == 139:
return 'E_ORK_01'
if table2Version == 208 and indicatorOfParameter == 138:
return 'U_ORK_01'
if table2Version == 208 and indicatorOfParameter == 137:
return 'U_ORKAR_01'
if table2Version == 208 and indicatorOfParameter == 136:
return 'W_STMSW_01'
if table2Version == 208 and indicatorOfParameter == 134:
return 'W_STM_01'
if table2Version == 208 and indicatorOfParameter == 132:
return 'W_WND_01'
if table2Version == 208 and indicatorOfParameter == 77:
return 'E_SF_12'
if table2Version == 208 and indicatorOfParameter == 75:
return 'U_SFSK_12'
if table2Version == 208 and indicatorOfParameter == 74:
return 'W_SF_12'
if table2Version == 208 and indicatorOfParameter == 72:
return 'W_SFL_12'
if table2Version == 208 and indicatorOfParameter == 71:
return 'U_SFSK_06'
if table2Version == 208 and indicatorOfParameter == 70:
return 'W_SF_06'
if table2Version == 208 and indicatorOfParameter == 69:
return 'W_SFL_06'
if table2Version == 208 and indicatorOfParameter == 32:
return 'E_DR_12'
if table2Version == 208 and indicatorOfParameter == 29:
return 'U_DRRER_12'
if table2Version == 208 and indicatorOfParameter == 26:
return 'W_DRR_12'
if table2Version == 208 and indicatorOfParameter == 17:
return 'U_SKRRH_06'
if table2Version == 208 and indicatorOfParameter == 14:
return 'W_SKRR_06'
if table2Version == 208 and indicatorOfParameter == 3:
return 'U_SKRRH_01'
if table2Version == 208 and indicatorOfParameter == 1:
return 'W_SKRR_01'
if table2Version == 201 and indicatorOfParameter == 132 and timeRangeIndicator == 0:
return 'GRAU_GSP'
if table2Version == 201 and indicatorOfParameter == 113 and timeRangeIndicator == 1:
return 'RAIN_CON'
if table2Version == 201 and indicatorOfParameter == 102 and timeRangeIndicator == 1:
return 'RAIN_GSP'
if table2Version == 201 and indicatorOfParameter == 5 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'APAB_S'
if table2Version == 2 and indicatorOfParameter == 125 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'AVMFL_S'
if table2Version == 2 and indicatorOfParameter == 124 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'AUMFL_S'
if table2Version == 2 and indicatorOfParameter == 122 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'ASHFL_S'
if table2Version == 2 and indicatorOfParameter == 121 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return 'ALHFL_S'
if table2Version == 2 and indicatorOfParameter == 61 and timeRangeIndicator == 1:
return 'TOT_PREC'
if table2Version == 2 and indicatorOfParameter == 78 and timeRangeIndicator == 1:
return 'SNOW_CON'
if table2Version == 2 and indicatorOfParameter == 79 and timeRangeIndicator == 1:
return 'SNOW_GSP'
if table2Version == 2 and indicatorOfParameter == 112 and indicatorOfTypeOfLevel == 1 and timeRangeIndicator == 1:
return | |
import random
from collections import deque
from random import shuffle, randint
import gui.constants as c
# Uncomment to replicate random _maze generations
# random.seed(0)
class MazeBuilder:
def __init__(self):
"""
Create a new MazeBuilder instance, which handles maze initialization and random generation.
"""
self._maze = []
self._start_pos = (0, 0)
self._end_pos = (0, 0)
self._start_idx = 0
self._end_idx = 0
self._size = 0
self._box_width = c.WIDTH // c.BOX_SIZE
self._box_height = c.HEIGHT // c.BOX_SIZE
self.initialize_maze()
def get_endpoints(self):
"""
Get the start and end coordinates of the maze.
:return: Tuple on the form ((sx, sy), (ex, ey))
"""
return tuple([self._start_pos, self._end_pos])
def initialize_maze(self):
"""
Creates a 2d list where each element in the list contains its x and y position, along with its current color
code which signifies what color it should be. => Each element in the list is on the form [x, y, color].
:return: None
"""
# instantiate the maze in a 2D list, where each element contains x_pos, y_pos and color_code
self._maze = [[x + c.MAZE_LOC[0], y + c.MAZE_LOC[1], 0] for y in range(0, c.HEIGHT, c.BOX_SIZE) for x in
range(0, c.WIDTH, c.BOX_SIZE)]
self._size = len(self._maze)
half_len = self._size // 2
# compute the start and end index of the maze
self._start_idx = half_len if self._box_height % 2 == 0 else half_len - self._box_width // 2
self._end_idx = self._start_idx + self._box_width - 1
self._maze[self._start_idx][2] = -1
self._maze[self._end_idx][2] = -2
# compute the start and end position
self._start_pos = (self._maze[self._start_idx][0], self._maze[self._start_idx][1])
self._end_pos = (self._maze[self._end_idx][0], self._maze[self._end_idx][1])
def get_maze(self):
"""
Get the active maze list.
:return: None
"""
return self._maze
def get_unvisited_neighbours(self, i, visited):
"""
Get all the neighbours of a tile.
:param i: index of the current tile
:param visited: visited array
:return: list of unvisited neighbours, where each element contains [index, direction]
"""
neighbours = []
if i - 1 >= 0 and i % self._box_width != 0 and not visited[i - 1]: # west
neighbours.append([i - 1, -2])
if i + 1 < self._size and (i + 1) % self._box_width != 0 and not visited[i + 1]: # east
neighbours.append([i + 1, -1])
if i - self._box_width >= 0 and not visited[i - self._box_width]: # north
neighbours.append([i - self._box_width, 1])
if i + self._box_width < self._size and not visited[i + self._box_width]: # south
neighbours.append([i + self._box_width, 2])
return neighbours
def process_neighbour(self, ci, ni, maze, visited, stack):
"""
Check if the given neighbour is unvisited, if so it is added to the stack. Furthermore, a check is performed to
ensure the new tile does not create a 2x2 square of white tiles, in which case we do not want to add
the neighbour to the stack, but instead mark it as visited and replace it with a wall.
:param ci: current index of the tile
:param ni: new index to check weather valid neighbour or not
:param maze: maze list
:param visited: visited list
:param stack: dfs stack
:return: None
"""
if ci - ni == 1: # Going west
# If the new tile forms a 2x2 white square, mark it as visited and block it
if ci + self._box_width < self._size and maze[ni + self._box_width] <= 0 \
and maze[ci + self._box_width] <= 0:
visited[ni] = True
maze[ni] = 1
# If the new tile forms a 2x2 white square, mark it as visited and block it
elif ci - self._box_width >= 0 and maze[ni - self._box_width] <= 0 and maze[ci - self._box_width] <= 0:
visited[ni] = True
maze[ni] = 1
# No 2x2 square will be created by the new tile, add it to the stack
else:
stack.append(ni)
# Same logic as above applies.
elif ci - ni == -1: # Going east
if ni + self._box_width < self._size and maze[ni + self._box_width] <= 0 \
and maze[ci + self._box_width] <= 0:
visited[ni] = True
maze[ni] = 1
elif ni - self._box_width >= 0 and maze[ni - self._box_width] <= 0 and maze[ci - self._box_width] <= 0:
visited[ni] = True
maze[ni] = 1
else:
stack.append(ni)
elif ci - ni > 0: # Going north
if ni - 1 >= 0 and maze[ni - 1] <= 0 and maze[ci - 1] <= 0:
visited[ni] = True
maze[ni] = 1
elif ci + 1 < self._size and maze[ni + 1] <= 0 and maze[ci + 1] <= 0:
visited[ni] = True
maze[ni] = 1
else:
stack.append(ni)
else: # Going south
if ni + 1 < self._size and maze[ni + 1] <= 0 and maze[ci + 1] <= 0:
visited[ni] = True
maze[ni] = 1
elif ci - 1 >= 0 and maze[ni - 1] <= 0 and maze[ci - 1] <= 0:
visited[ni] = True
maze[ni] = 1
else:
stack.append(ni)
def _backtrack_visitors(self, i, maze, visited):
"""
When we create a new white, open tile, we check what direction we most likely came from, and mark
its neighbours as visited. Consider the example below, where '*' is a black wall, o is a white path
tile, and i is the current tile we are at. In this case we want to mark the tiles b1 and b3 as visited,
because we want the walls to stay there. This is to generate a more 'organic' maze where paths in the _maze
are properly separated by walls.
a b c d
1|*|*|*|*|\n
2|o|o|i|*|\n
3|*|*|*|*|\n
4|*|*|*|*|\n
:param i: index of current white tile.
:param maze: maze list
:param visited: visited list
:return: None
"""
# check west
wi = i - 1
if wi >= 0 and wi % self._box_width != 0 and maze[wi] <= 0:
vis = False
if wi - self._box_width >= 0:
visited[wi - self._box_width] = True
vis = True
if wi + self._box_width >= 0:
visited[wi - self._box_width] = True
vis = True
if vis:
return
# check east
ei = i + 1
if ei < self._size and ei % self._box_width != 0 and maze[ei] <= 0:
vis = False
if ei - self._box_width >= 0:
visited[ei - self._box_width] = True
vis = True
if wi + self._box_width >= 0:
visited[ei - self._box_width] = True
vis = True
if vis:
return
# check south
si = i - self._box_width
if si >= 0 and maze[si] <= 0:
vis = False
if si - 1 >= 0 and (si - 1) % self._box_width != 0:
visited[si - 1] = True
vis = True
if si + 1 < self._size and si % self._box_width != 0:
visited[si + 1] = True
vis = True
if vis:
return
# check north
ni = i + self._box_width
if ni < self._size and maze[ni] <= 0:
vis = False
if ni - 1 >= 0 and (ni - 1) % self._box_width != 0:
visited[ni - 1] = True
vis = True
if ni + 1 < self._size and ni % self._box_width != 0:
visited[ni + 1] = True
vis = True
if vis:
return
def generate_random_maze(self):
"""
Uses the principles of depth-first-search with randomized neighbour selection to generate an organic looking
maze. The dfs is implemented using a stack (collections deque).
:return: yields the wall to remove every time next() is called on this function.
"""
# Create a list containing the color code of the _maze tiles
maze = [box[2] if box[2] < 0 else 1 for box in self._maze]
# Create a list to remember which vertices (or tiles) have already been visited.
visited = [False for i in range(self._size)]
stack = deque()
sx = self._start_idx
stack.append(sx)
# Remember the direction we came from
prev_dir = 0
# counter variable to keep track of how many increments has been executed (reset on every yield)
increments = 1
while len(stack) != 0:
cur = stack.pop()
if not visited[cur]:
self._backtrack_visitors(cur, maze, visited)
# start and end tiles must not be yielded
if maze[cur] >= 0:
maze[cur] = 0
yield cur, increments
| |
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step("Verify flows are all added")
for i in range( main.flowCheck ):
if i != 0:
main.log.warn( "verification failed. Retrying..." )
main.log.info( "Waiting for onos to add flows..." )
time.sleep( main.checkFlowsDelay )
flowState = main.TRUE
for cli in main.CLIs:
flowState = cli.checkFlowState()
if not flowState:
main.log.warn( "Not all flows added" )
if flowState:
break
else:
#Dumping summary
main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) )
utilities.assert_equals( expect=main.TRUE, actual=flowState,
onpass="FLOWS INSTALLED",
onfail="SOME FLOWS NOT ADDED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(main.pingSleep)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
caseResult = ( pingResult and intentState and flowState)
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 25 single to multi point Intents and Ping All test PASS",
onfail="Install 25 single to multi point Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not checkFlowsState:
main.log.debug( "Flows failed to add completely" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE97( self ):
"""
Install single-multi point intents and verify Ping all works
for Chordal topology
"""
import copy
main.log.report( "Install single-multi point intents and verify Ping all" )
main.log.report( "___________________________________________" )
main.case( "Install single-multi point intents and Ping all" )
deviceDPIDsCopy = copy.copy(main.deviceDPIDs)
portEgressList = ['1']*(len(deviceDPIDsCopy) - 1)
intentIdList = []
main.log.info( "MACsDict" + str(main.MACsDict) )
time1 = time.time()
for i in xrange(0,len(deviceDPIDsCopy),int(main.numCtrls)):
pool = []
for cli in main.CLIs:
ingressDevice = deviceDPIDsCopy[i]
egressDeviceList = copy.copy(deviceDPIDsCopy)
egressDeviceList.remove(ingressDevice)
if i >= len( deviceDPIDsCopy ):
break
t = main.Thread( target=cli.addSinglepointToMultipointIntent,
threadID=main.threadID,
name="addSinglepointToMultipointIntent",
args =[ingressDevice,egressDeviceList,'1',portEgressList,'',main.MACsDict.get(ingressDevice),''])
pool.append(t)
t.start()
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
intentIdList.append(thread.result)
time2 = time.time()
main.log.info("Time for adding point intents: %2f seconds" %(time2-time1))
main.step("Verify intents are installed")
# Giving onos multiple chances to install intents
for i in range( main.intentCheck ):
if i != 0:
main.log.warn( "Verification failed. Retrying..." )
main.log.info("Waiting for onos to install intents...")
time.sleep( main.checkIntentsDelay )
intentState = main.TRUE
for e in range(int(main.numCtrls)):
main.log.info( "Checking intents on CLI %s" % (e+1) )
intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\
intentState
if not intentState:
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step("Verify flows are all added")
for i in range( main.flowCheck ):
if i != 0:
main.log.warn( "verification failed. Retrying..." )
main.log.info( "Waiting for onos to add flows..." )
time.sleep( main.checkFlowsDelay )
flowState = main.TRUE
for cli in main.CLIs:
flowState = cli.checkFlowState()
if not flowState:
main.log.warn( "Not all flows added" )
if flowState:
break
else:
#Dumping summary
main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) )
utilities.assert_equals( expect=main.TRUE, actual=flowState,
onpass="FLOWS INSTALLED",
onfail="SOME FLOWS NOT ADDED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(main.pingSleep)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
caseResult = ( pingResult and intentState and flowState)
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 25 single to multi point Intents and Ping All test PASS",
onfail="Install 25 single to multi point Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not checkFlowsState:
main.log.debug( "Flows failed to add completely" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE98( self ):
"""
Install single-multi point intents and verify Ping all works
for Spine topology
"""
import copy
main.log.report( "Install single-multi point intents and verify Ping all" )
main.log.report( "___________________________________________" )
main.case( "Install single-multi point intents and Ping all" )
deviceDPIDsCopy = copy.copy( main.deviceDPIDs )
deviceDPIDsCopy = deviceDPIDsCopy[ 10: ]
portEgressList = [ '1' ]*(len(deviceDPIDsCopy) - 1)
intentIdList = []
MACsDictCopy = {}
for i in range( len( deviceDPIDsCopy ) ):
MACsDictCopy[ deviceDPIDsCopy[ i ] ] = main.hostMACs[i].split( '/' )[ 0 ]
main.log.info( "deviceDPIDsCopy" + str(deviceDPIDsCopy) )
main.log.info( "MACsDictCopy" + str(MACsDictCopy) )
time1 = time.time()
for i in xrange(0,len(deviceDPIDsCopy),int(main.numCtrls)):
pool = []
for cli in main.CLIs:
if i >= len( deviceDPIDsCopy ):
break
ingressDevice = deviceDPIDsCopy[i]
egressDeviceList = copy.copy(deviceDPIDsCopy)
egressDeviceList.remove(ingressDevice)
t = main.Thread( target=cli.addSinglepointToMultipointIntent,
threadID=main.threadID,
name="addSinglepointToMultipointIntent",
args =[ingressDevice,egressDeviceList,'1',portEgressList,'',MACsDictCopy.get(ingressDevice),''])
pool.append(t)
t.start()
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
intentIdList.append(thread.result)
time2 = time.time()
main.log.info("Time for adding point intents: %2f seconds" %(time2-time1))
main.step("Verify intents are installed")
# Giving onos multiple chances to install intents
for i in range( main.intentCheck ):
if i != 0:
main.log.warn( "Verification failed. Retrying..." )
main.log.info("Waiting for onos to install intents...")
time.sleep( main.checkIntentsDelay )
intentState = main.TRUE
for e in range(int(main.numCtrls)):
main.log.info( "Checking intents on CLI %s" % (e+1) )
intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\
intentState
if not intentState:
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "Intents:\n" + str( main.ONOScli1.intents( jsonFormat=False, summary=True ) ) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step("Verify flows are all added")
for i in range( main.flowCheck ):
if i != 0:
main.log.warn( "verification failed. Retrying..." )
main.log.info( "Waiting for onos to add flows..." )
time.sleep( main.checkFlowsDelay )
flowState = main.TRUE
for cli in main.CLIs:
flowState = cli.checkFlowState()
if not flowState:
main.log.warn( "Not all flows added" )
if flowState:
break
else:
#Dumping summary
main.log.info( "Summary:\n" + str( main.ONOScli1.summary(jsonFormat=False) ) )
utilities.assert_equals( expect=main.TRUE, actual=flowState,
onpass="FLOWS INSTALLED",
onfail="SOME FLOWS NOT ADDED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(main.pingSleep)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
caseResult = ( pingResult and intentState and flowState)
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 25 single to multi point Intents and Ping All test PASS",
onfail="Install 25 single to multi point Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not checkFlowsState:
main.log.debug( "Flows failed to add completely" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE190( self ):
"""
Verify IPv6 ping across 600 Point intents (Att Topology)
"""
main.log.report( "Verify IPv6 ping across 600 Point intents (Att Topology)" )
main.log.report( "_________________________________________________" )
import itertools
import time
main.case( "IPv6 ping all 600 Point intents" )
main.step( "Verify IPv6 Ping across all hosts" )
pingResult = main.FALSE
time1 = time.time()
pingResult = main.Mininet1.pingall( protocol="IPv6", timeout=main.pingTimeout )
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time1 = time.time()
pingResult = main.Mininet1.pingall( protocol="IPv6", timeout=main.pingTimeout )
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for IPv6 Ping All: " +
str( timeDiff ) +
" seconds" )
utilities.assert_equals( expect=main.TRUE, actual=pingResult,
onpass="PING ALL PASS",
onfail="PING ALL FAIL" )
caseResult = pingResult
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="IPv6 Ping across 600 Point intents test PASS",
onfail="IPv6 Ping across 600 Point intents test FAIL" )
def CASE191( self ):
"""
Verify IPv6 ping across 600 Point intents (Chordal Topology)
"""
main.log.report( "Verify IPv6 ping across 600 Point intents (Chordal Topology)" )
main.log.report( "_________________________________________________" )
| |
Byte 4-5 | | TX = Stop X-Axis | Sets which Axis is to be stopped |
| | | TY = Stop Y-Axis | |
| | | TZ = Stop Z-Axis | |
| | | TE = Stop E-Axis | |
| | | TA = Stop All | |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | \* | End of Command |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
In this case the completed command will be sent back when the Axis that has came to a stop.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the Received reply, but the
completed command ID will be from the original ID used in the Start command.
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+-------------+-------------+
| | X Stop | | Y Stop | | Z Stop | | E Stop | | All Stop | | X Pulse | | Y Pulse | | Z Pulse | | E Pulse |
| | Received | | Received | | Received | | Received | | Received | | Stopped | | Stopped | | Stopped | | Stopped |
+==============+==============+==============+==============+===============+===============+===============+=============+=============+
| RI00TX* | RI00TY* | RI00TZ* | RI00TE* | RI00TA* | CI00TX* | CI00TY* | CI00TZ* | CI00TE* |
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+-------------+-------------+
"""
command = f"{self.command_type}{self.command_id:02}{self.__stop_axis_command}{self.axis}{self._command_end}"
self.__stop(command=command)
return command
def stop_all(self):
"""
Stop all of the pulse trains from running. This is a controlled stop, in that the Axis will ramp down
and not just stop to protect the motors. If you want to use a sudden stop then we recommend a external
Emergency Stop button that cuts the power or send a Reset command.
:returns: the command to send to the serial port
:rtype: str
**Command**
+----------+------------+------------+----------------------------------------------------------------------------------+
| Byte 1 | Byte 2-3 | Byte 4-5 | Byte 6 |
+==========+============+============+==================================================================================+
| I | 00 | TA | \* |
+----------+------------+------------+----------------------------------------------------------------------------------+
|
| **Command breakdown**
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte | Setting | Description |
+==============+==========================+==========================================================================================+
| Byte 1 | I=Instant Command | Sets command to either Instant or Buffer. |
| | B=Buffer Command | |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 2-3 | 0-99 | Optional Command ID |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 4-5 | | TX = Stop X-Axis | Sets which Axis is to be stopped |
| | | TY = Stop Y-Axis | |
| | | TZ = Stop Z-Axis | |
| | | TE = Stop E-Axis | |
| | | TA = Stop All | |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | \* | End of Command |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
In this case the completed command will be sent back when the Axis that has came to a stop.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the Received reply, but the
completed command ID will be from the original ID used in the Start command.
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+-------------+-------------+
| | X Stop | | Y Stop | | Z Stop | | E Stop | | All Stop | | X Pulse | | Y Pulse | | Z Pulse | | E Pulse |
| | Received | | Received | | Received | | Received | | Received | | Stopped | | Stopped | | Stopped | | Stopped |
+==============+==============+==============+==============+===============+===============+===============+=============+=============+
| RI00TX* | RI00TY* | RI00TZ* | RI00TE* | RI00TA* | CI00TX* | CI00TY* | CI00TZ* | CI00TE* |
+--------------+--------------+--------------+--------------+---------------+---------------+---------------+-------------+-------------+
"""
command = f"{self.command_type}{self.command_id:02}{self.__stop_all_axis_command}" \
f"{self._command_end}"
self.__stop(command=command)
return command
def __stop(self, command):
"""
This is a common method that is called by all the other stop methods.
:returns: Whether the method was successful or not. If true then it was successful.
:rtype: bool
"""
if not self._validate_command():
return False
if self.__started:
if self.debug:
print(f"Axis stop command: {command}")
if self.auto_send_command:
self.send_command(command=command)
self.__started = False
def pause(self, return_x_pulse_cnt=None, return_y_pulse_cnt=None, return_z_pulse_cnt=None, return_e_pulse_cnt=None):
"""
Pauses one of the pulse trains from running.
Bytes 6-9 choose to send Pulse count back after pause for each Axis.
:param return_x_pulse_cnt: X axis pulse count replies, disable = 0, enable = 1 - default 0 or
self.pause_all_return_x_pulse_count
:param return_y_pulse_cnt: Y axis pulse count replies, disable = 0, enable = 1 - default 0 or
self.pause_all_return_y_pulse_count
:param return_z_pulse_cnt: Z axis pulse count replies, disable = 0, enable = 1 - default 0 or
self.pause_all_return_z_pulse_count
:param return_e_pulse_cnt: E axis pulse count replies, disable = 0, enable = 1 - default 0 or
self.pause_all_return_e_pulse_count
:returns: the command to send to the serial port
:rtype: str
**Command**
+----------+------------+------------+-----------+-----------+-----------+-----------+-----------+----------------------------------+
| Byte 1 | Byte 2-3 | Byte 4-5 | Byte 6 | Byte 7 | Byte 8 | Byte 9 | Byte 10 | |
+==========+============+============+===========+===========+===========+===========+===========+==================================+
| I | 00 | PX | 1 | 0 | 0 | 0 | \* | |
+----------+------------+------------+-----------+-----------+-----------+-----------+-----------+----------------------------------+
|
| **Command breakdown**
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte | Setting | Description |
+==============+==========================+==========================================================================================+
| Byte 1 | I=Instant Command | Sets command to either Instant or Buffer. |
| | B=Buffer Command | |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 2-3 | 0-99 | Optional Command ID |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 4-5 | | PX = Pause X-Axis | Sets which Axis is to be Paused |
| | | PY = Pause Y-Axis | |
| | | PZ = Pause Z-Axis | |
| | | PE = Pause E-Axis | |
| | | PA = Pause All | |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 6 | 0-1 | | Sends back pulse count from Axis if set to 1 with: |
| | | | PA = X-Axis |
| | | | PX = X-Axis |
| | | | PY = Y-Axis |
| | | | PZ = Z-Axis |
| | | | PE = E-Axis |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 7 | 0-1 | | Sends back pulse count from Axis if set to 1 with: |
| | | | PA = Y-Axis |
| | | | Set to 0 for PX, PY, PZ, PE |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 8 | 0-1 | | Sends back pulse count from Axis if set to 1 with: |
| | | | PA = Z-Axis |
| | | | Set to 0 for PX, PY, PZ, PE |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 9 | 0-1 | | Sends back pulse count from Axis if set to 1 with: |
| | | | PA = E-Axis |
| | | | Set to 0 for PX, PY, PZ, PE |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
| Byte 10 | \* | End of Command |
+--------------+--------------------------+------------------------------------------------------------------------------------------+
|
| **Reply**
The PTHAT will send back a reply when it receives a command and also when it has completed a command.
If Pulse Count is selected then it will also send back the pulse count of chosen Axis.
In this case the completed command will be sent back when the Axis is resumed, after a Pause.
If the Command sent ID number was set for bytes 2-3, then this will be returned in the reply.
+------------------+------------------+------------------+------------------+------------------+---------------------------------------------+
| | X Pause | | Y Pause | | Z Pause | | E Pause | | Pause All | |
| | Received | | Received | | Received | | Received | | Received | |
+==================+==================+==================+==================+==================+=============================================+
| RI00PX* | RI00PY* | RI00PZ* | RI00PE* | RI00PA* | |
+------------------+------------------+------------------+------------------+------------------+---------------------------------------------+
| | DI00PX* | | DI00PY* | | DI00PZ* | | DI00PE* | | DI00PX to E* | |
| | XP(D)XResult* | | XP(D)XResult* | |
review request.
This shows the type of update that was made, the user who made the
update, and when the update was made. Clients can use this to inform
the user that the review request was updated, or automatically update
it in the background.
This does not take into account changes to a draft review request, as
that's generally not update information that the owner of the draft is
interested in. Only public updates are represented.
"""
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_request_resource.has_access_permissions(request,
review_request):
return _no_access_error(request.user)
timestamp, updated_object = review_request.get_last_activity()
timestamp = timestamp.isoformat()
if get_modified_since(request, timestamp):
return HttpResponseNotModified()
user = None
summary = None
update_type = None
if isinstance(updated_object, ReviewRequest):
user = updated_object.submitter
if updated_object.status == ReviewRequest.SUBMITTED:
summary = _("Review request submitted")
elif updated_object.status == ReviewRequest.DISCARDED:
summary = _("Review request discarded")
else:
summary = _("Review request updated")
update_type = "review-request"
elif isinstance(updated_object, DiffSet):
summary = _("Diff updated")
update_type = "diff"
elif isinstance(updated_object, Review):
user = updated_object.user
if updated_object.is_reply():
summary = _("New reply")
update_type = "reply"
else:
summary = _("New review")
update_type = "review"
else:
# Should never be able to happen. The object will always at least
# be a ReviewRequest.
assert False
return 200, {
self.item_result_key: {
'timestamp': timestamp,
'user': user,
'summary': summary,
'type': update_type,
}
}, {
'Last-Modified': http_date(timestamp)
}
review_request_last_update_resource = ReviewRequestLastUpdateResource()
class ReviewRequestResource(WebAPIResource):
"""Provides information on review requests."""
model = ReviewRequest
name = 'review_request'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the review request.',
},
'submitter': {
'type': UserResource,
'description': 'The user who submitted the review request.',
},
'time_added': {
'type': str,
'description': 'The date and time that the review request was '
'added (in YYYY-MM-DD HH:MM:SS format).',
},
'last_updated': {
'type': str,
'description': 'The date and time that the review request was '
'last updated (in YYYY-MM-DD HH:MM:SS format).',
},
'status': {
'type': ('discarded', 'pending', 'submitted'),
'description': 'The current status of the review request.',
},
'public': {
'type': bool,
'description': 'Whether or not the review request is currently '
'visible to other users.',
},
'changenum': {
'type': int,
'description': 'The change number that the review request is '
'representing. These are server-side '
'repository-specific change numbers, and are not '
'supported by all types of repositories. This may '
'be ``null``.',
},
'repository': {
'type': RepositoryResource,
'description': "The repository that the review request's code "
"is stored on.",
},
'summary': {
'type': str,
'description': "The review request's brief summary.",
},
'description': {
'type': str,
'description': "The review request's description.",
},
'testing_done': {
'type': str,
'description': 'The information on the testing that was done '
'for the change.',
},
'bugs_closed': {
'type': [str],
'description': 'The list of bugs closed or referenced by this '
'change.',
},
'branch': {
'type': str,
'description': 'The branch that the code was changed on or that '
'the code will be committed to. This is a '
'free-form field that can store any text.',
},
'target_groups': {
'type': [ReviewGroupResource],
'description': 'The list of review groups who were requested '
'to review this change.',
},
'target_people': {
'type': [UserResource],
'description': 'The list of users who were requested to review '
'this change.',
},
}
uri_object_key = 'review_request_id'
model_object_key = 'display_id'
last_modified_field = 'last_updated'
item_child_resources = [
change_resource,
diffset_resource,
review_request_draft_resource,
review_request_last_update_resource,
review_resource,
screenshot_resource,
file_attachment_resource
]
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
_close_type_map = {
'submitted': ReviewRequest.SUBMITTED,
'discarded': ReviewRequest.DISCARDED,
}
def get_queryset(self, request, is_list=False, local_site_name=None,
*args, **kwargs):
"""Returns a queryset for ReviewRequest models.
By default, this returns all published or formerly published
review requests.
If the queryset is being used for a list of review request
resources, then it can be further filtered by one or more of the
following arguments in the URL:
* ``changenum``
- The change number the review requests must be
against. This will only return one review request
per repository, and only works for repository
types that support server-side changesets.
* ``time-added-to``
- The date/time that all review requests must be added before.
This is compared against the review request's ``time_added``
field. See below for information on date/time formats.
* ``time-added-from``
- The earliest date/time the review request could be added.
This is compared against the review request's ``time_added``
field. See below for information on date/time formats.
* ``last-updated-to``
- The date/time that all review requests must be last updated
before. This is compared against the review request's
``last_updated`` field. See below for information on date/time
formats.
* ``last-updated-from``
- The earliest date/time the review request could be last
updated. This is compared against the review request's
``last_updated`` field. See below for information on date/time
formats.
* ``from-user``
- The username that the review requests must be owned by.
* ``repository``
- The ID of the repository that the review requests must be on.
* ``ship-it``
- The review request must have at least one review with Ship It
set, if this is 1. Otherwise, if 0, it must not have any marked
Ship It.
* ``status``
- The status of the review requests. This can be ``pending``,
``submitted`` or ``discarded``.
* ``to-groups``
- A comma-separated list of review group names that the review
requests must have in the reviewer list.
* ``to-user-groups``
- A comma-separated list of usernames who are in groups that the
review requests must have in the reviewer list.
* ``to-users``
- A comma-separated list of usernames that the review requests
must either have in the reviewer list specifically or by way
of a group.
* ``to-users-directly``
- A comma-separated list of usernames that the review requests
must have in the reviewer list specifically.
Some arguments accept dates. The handling of dates is quite flexible,
accepting a variety of date/time formats, but we recommend sticking
with ISO8601 format.
ISO8601 format defines a date as being in ``{yyyy}-{mm}-{dd}`` format,
and a date/time as being in ``{yyyy}-{mm}-{dd}T{HH}:{MM}:{SS}``.
A timezone can also be appended to this, using ``-{HH:MM}``.
The following examples are valid dates and date/times:
* ``2010-06-27``
* ``2010-06-27T16:26:30``
* ``2010-06-27T16:26:30-08:00``
"""
local_site = _get_local_site(local_site_name)
if is_list:
q = Q()
exclude_q = Q()
if 'to-groups' in request.GET:
for group_name in request.GET.get('to-groups').split(','):
q = q & self.model.objects.get_to_group_query(group_name,
None)
if 'to-users' in request.GET:
for username in request.GET.get('to-users').split(','):
q = q & self.model.objects.get_to_user_query(username)
if 'to-users-directly' in request.GET:
for username in request.GET.get('to-users-directly').split(','):
q = q & self.model.objects.get_to_user_directly_query(
username)
if 'to-users-groups' in request.GET:
for username in request.GET.get('to-users-groups').split(','):
q = q & self.model.objects.get_to_user_groups_query(
username)
if 'from-user' in request.GET:
q = q & self.model.objects.get_from_user_query(
request.GET.get('from-user'))
if 'repository' in request.GET:
q = q & Q(repository=int(request.GET.get('repository')))
if 'changenum' in request.GET:
q = q & Q(changenum=int(request.GET.get('changenum')))
if 'ship-it' in request.GET:
ship_it = request.GET.get('ship-it')
if ship_it in ('1', 'true', 'True'):
q = q & Q(reviews__ship_it=True)
elif ship_it in ('0', 'false', 'False'):
exclude_q = exclude_q & Q(reviews__ship_it=True)
if 'time-added-from' in request.GET:
date = self._parse_date(request.GET['time-added-from'])
if date:
q = q & Q(time_added__gte=date)
if 'time-added-to' in request.GET:
date = self._parse_date(request.GET['time-added-to'])
if date:
q = q & Q(time_added__lt=date)
if 'last-updated-from' in request.GET:
date = self._parse_date(request.GET['last-updated-from'])
if date:
q = q & Q(last_updated__gte=date)
if 'last-updated-to' in request.GET:
date = self._parse_date(request.GET['last-updated-to'])
if date:
q = q & Q(last_updated__lt=date)
status = string_to_status(request.GET.get('status', 'pending'))
queryset = self.model.objects.public(user=request.user,
status=status,
local_site=local_site,
extra_query=q)
if exclude_q:
queryset = queryset.exclude(exclude_q)
return queryset
else:
return self.model.objects.filter(local_site=local_site)
def has_access_permissions(self, request, review_request, *args, **kwargs):
return review_request.is_accessible_by(request.user)
def has_modify_permissions(self, request, review_request, *args, **kwargs):
return review_request.is_mutable_by(request.user)
def has_delete_permissions(self, request, review_request, *args, **kwargs):
return request.user.has_perm('reviews.delete_reviewrequest')
def serialize_bugs_closed_field(self, obj):
return obj.get_bug_list()
def serialize_status_field(self, obj):
return status_to_string(obj.status)
def serialize_id_field(self, obj):
return obj.display_id
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(NOT_LOGGED_IN, PERMISSION_DENIED, INVALID_USER,
INVALID_REPOSITORY, CHANGE_NUMBER_IN_USE,
INVALID_CHANGE_NUMBER, EMPTY_CHANGESET,
REPO_AUTHENTICATION_ERROR, REPO_INFO_ERROR,
MISSING_REPOSITORY)
@webapi_request_fields(
required={
'repository': {
'type': str,
'description': 'The path or ID of the repository that the '
'review request is for.',
},
},
optional={
'changenum': {
'type': int,
'description': 'The optional changenumber to look up for the '
'review request details. This only works | |
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9aHBo9hTpoMANAWAR5sSvwU2cGKUszBEni",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9quP3RgwEnBTkAWnoWMNhUPniAEmDvcCtS",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9h5EYaAGgerMEa3WAUq7uTugbfW6vzyB2N",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9q7RmYbYqvskEbuCT4UfHgiWoa3SaegAnm",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>NEG<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9igHa8yheahnJ9DcuFaNieoeyra52JxgrY",
"9hPEnL8Kjq28METub8EVe2Ei3BimZFaKfL",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9UNzGeDe4gwmEnn7Qu2Wo5iDppbRKtiMhE",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9dTyrRc1oCnTm517kpLAABAsAipcrAAWNV",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9pntAWHW7hMr827RnBFYt6P5a3wLuPWASp",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9fAxce7A89SYB7eVhr5sWsnT15Za5Kwao5",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9pEa8SWp4TKcBdYSrobyGgT457hHEXhsYy",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9TeD3gouUkof3Epatu6bVNUnnrgTjd45Kw",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9h2KodU1p4z1XybLCE7HERYEToKe1wpM2j",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9kAhjeiYuGVTVRJgW<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9odZ9MNSeRe3ShbP3o7whKa1u5W2qUphPr",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9i4bDVndDMmS9susCBZmUFj7irEDoMtcr3",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9jkuSpLiw1FAvRcf8nAemic9oYNthhs587",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9V6nXCLDR2FxowY4MeepgtdvatoEDHebyG",
"<KEY>",
"9ktFhDrpAabs4KpEqvuT5Ecy5SA6mhNiS7",
"9r9yUw5uzkwYDpBdeJfas2J2WGcfHW3ZU3",
"<KEY>",
"<KEY>",
"<KEY>",
"9oaMTDnveCzQNTYYod2BevUXTpsDbyGRT4",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9cQaQ4cin6Y4aS5K6dgmWNFYuRKEawhmEE",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9n6tJeP4tm78kLp5ErRt9KeRWGraawfarD",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9r8AfEZyeF6rYi1dpyQYNorG7FWig1R7xe",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9VYwNiuPaWzMa5n6Y21itoR8RLU295NYQW",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9j4RrosUzwHotfsTA1Lm4idXMc1TofsfFa",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9ZDy6SwapAEhbivXiFRKAD9FrnDAvodBQ8",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9oFc5mBTAK4nwPRgNKnPuTUHrAZrLREpTD",
"<KEY>", | |
import odbpy as odb
from enum import Enum
import random
import math
import copy
def ErrorQuit(msg):
print("ERROR: %s" %(msg))
exit(1)
# bookshelf object type
class BsType (Enum):
MOVABLE_STD_INST = 0
MOVABLE_MACRO_INST = 1
FIXED_INST = 2
PRIMARY = 3
class Instance:
def __init__(self, name, lx, ly, width, height, isFixed):
self.name = name
self.lx = lx
self.ly = ly
self.width = width
self.height = height
self.isFixed = isFixed
self.numOutputs = 0
self.numInputs = 0
self.isFeasible = True
# only used for FIXED insts to generate fake macro lef.
# have (offsetX,offsetY,direction) pairs on each pin
self.pins = []
# saves dbITerm objects
self.odbIPins = []
self.odbOPins = []
self.odbIPinIdx = 0
self.odbOPinIdx = 0
# only used for FIXED insts to generate fake macro lef.
# have (lx, ly, ux, uy) pairs on each obs.
self.obses = []
# saves odbInst pointer
self.odbInst = None
# odbMaster will be used only for "FixedInsts"
self.odbMaster = None
# odbClkPin will be used to create clk net
self.odbClkPin = None
def SetLxLy(self, lx, ly):
self.lx = lx
self.ly = ly
def GetUxUy(self):
return self.lx + self.width, self.ly + self.height
# for nets traversing
def IncrOutPin(self):
self.numOutputs += 1
def IncrInPin(self):
self.numInputs += 1
def NumInputs(self):
return self.numInputs
def NumOutputs(self):
return self.numOutputs
def HasObs(self):
return len(self.obses) != 0
# takes x, y from *.shapes -- coordinates (placedX, placedY) will be given
def AddObs(self, shapeX, shapeY, width, height):
placeX = shapeX - self.lx
placeY = shapeY - self.ly
self.obses.append([placeX, placeY, placeX + width, placeY + height])
# takes x, y from *.nets -- all offsets are based on cell center!
def AddPin(self, netX, netY,direction):
self.pins.append([netX + self.width/2.0, netY + self.height/2.0, direction])
# add Odb ITerm input pins
def AddOdbIPin(self, odbIPin):
self.odbIPins.append(odbIPin)
# add Odb ITerm ouput pins
def AddOdbOPin(self, odbOPin):
self.odbOPins.append(odbOPin)
# set Odb ITerm clk pins for FF
def SetClkPin(self, odbClkPin):
self.odbClkPin = odbClkPin
# hashmap should be based on oPins/iPins
def GetHashName(self):
# return 10000 * self.width + 100* self.numOutputs + self.numInputs
return 10000 * self.numInputs + self.width
def IsFeasible(self):
# there are weird cells in bookshelf
# 1. numInputs = 0
# 2. numOutputs = 0
# 3. numOutputs = 2
# following will ignore these cases
# note that cases 1 and 2 will cause
# "huge insts removal" at commercial pnr flow
return self.isFeasible and self.numInputs != 0 and self.numOutputs == 1
def SetIsFeasible(self, val):
self.isFeasible = val
# Previous A2A author set FF on the following types:
def IsFF(self, minFFWidth = 20):
return self.numInputs == 1 and self.numOutputs == 1 and self.width >= minFFWidth
# update odbInst pointer
def SetOdbInst(self, odbInst, ffClkPinList = None):
self.odbInst = odbInst
dbITerms = self.odbInst.getITerms()
for dbITerm in dbITerms:
if ffClkPinList != None:
if dbITerm.getMTerm().getName() in ffClkPinList:
self.SetClkPin(dbITerm)
else:
if dbITerm.getSigType() == "CLOCK":
self.SetClkPin(dbITerm)
if dbITerm.getSigType() == "SIGNAL":
if dbITerm.getIoType() == "OUTPUT":
self.AddOdbOPin(dbITerm)
elif dbITerm.getIoType() == "INPUT":
self.AddOdbIPin(dbITerm)
#update odbMaster pointer for fixed insts
def SetOdbMaster(self, odbMaster):
self.odbMaster = odbMaster
# used when mapping
def RetrieveOdbInPin(self):
if len(self.odbIPins) <= self.odbIPinIdx:
print("WRONG(In):", self.name, self.odbIPins, self.odbIPinIdx)
retPin = self.odbIPins[self.odbIPinIdx]
self.odbIPinIdx += 1
return retPin
def RetrieveOdbOutPin(self):
if self.odbOPinIdx >= len(self.odbOPins):
print("WRONG(Out):", self.name, self.odbOPinIdx, self.odbOPins)
retPin = self.odbOPins[self.odbOPinIdx]
self.odbOPinIdx += 1
return retPin
class Primary:
def __init__(self, name, x, y, pinDir):
self.name = name
self.x = x
self.y = y
self.pinDir = pinDir
self.odbBTerm = None
def SetDir(self, pinDir):
self.pinDir = pinDir
def SetXY(self, x, y):
self.x = x
self.y = y
def SetOdbBTerm(self, odbBTerm):
self.odbBTerm = odbBTerm
class BookshelfToOdb:
def __init__(self, opendbpy, opendb, auxName, siteName,
macroInstPinLayer = None,
macroInstObsLayer = None,
primaryLayer = None,
mastersFileName = None,
ffClkPinList = None,
clkNetName = "clk",
# minFFWidth = 20,
targetFFRatio = 0.12,
customFPRatio = 1.0):
self.odbpy = opendbpy
self.odb = opendb
self.auxName = auxName
self.designName = auxName.split(".")[0]
self.siteName = siteName
self.ffClkPinList = ffClkPinList
self.clkNetName = clkNetName
# self.minFFWidth = minFFWidth
self.targetFFRatio = targetFFRatio
self.customFPRatio = customFPRatio
# *.shape determine
self.bsShapeList = []
# 1. parse bookshelf
self.ParseAux()
# 2. init the odb object.
self.InitOdb(
macroInstPinLayer,
macroInstObsLayer,
primaryLayer)
self.PostProcessInBookshelf()
# 3. parse odb object
self.ParseMasterUseList(mastersFileName)
# 4. map bookshelf and odb
self.ClassifyInsts()
# 5. finally Fill in OpenDB
self.FillOdb()
def ParseAux(self):
f = open(self.auxName, 'r')
cont = f.read()
f.close()
print("Parsing %s ..." % (self.auxName) )
nodeName = ""
netName = ""
shapeName = ""
sclName = ""
plName = ""
routeName = ""
for curLine in cont.split("\n"):
# should skip 1st, 2nd elements
for curFile in curLine.strip().split()[2:]:
if curFile.endswith("nodes"):
nodeName = curFile
elif curFile.endswith("nets"):
netName = curFile
elif curFile.endswith("shapes"):
shapeName = curFile
elif curFile.endswith("scl"):
sclName = curFile
elif curFile.endswith("pl"):
plName = curFile
elif curFile.endswith("route"):
routeName = curFile
elif curFile.endswith("wts"):
print("[WARNING] *.wts will be ignored")
if nodeName == "":
ErrorQuit("*.nodes is missing")
if netName == "":
ErrorQuit("*.nets is missing")
if sclName == "":
ErrorQuit("*.scl is missing")
if plName == "":
ErrorQuit("*.pl is missing")
self.ParseNodes(nodeName)
self.ParsePl(plName)
self.ParseNets(netName)
self.ParseScl(sclName)
# The *.shape is optional!
if shapeName != "":
self.ParseShapes(shapeName)
if routeName != "":
self.ParseRoutes(routeName)
print("Bookshelf Parsing Done")
def ParseNodes(self, nodeName):
print("Parsing %s ..." % (nodeName) )
f = open(nodeName, 'r')
cont = f.read()
f.close()
self.bsInstList = []
for curLine in cont.split("\n"):
curLine = curLine.strip()
if curLine.startswith("UCLA") or curLine.startswith("#") \
or curLine == "":
continue
if curLine.startswith("NumNodes"):
numNodes = int(curLine.split(" ")[-1])
elif curLine.startswith("NumTerminals"):
numTerminals = int(curLine.split(" ")[-1])
else:
self.bsInstList.append(curLine.split())
print("From Nodes: NumTotalNodes: %d" % (numNodes))
print("From Nodes: NumTerminals: %d" % (numTerminals))
numInsts = 0
numFixedInsts = 0
numPrimary = 0
for curInst in self.bsInstList:
if len(curInst) == 3:
numInsts += 1
elif curInst[-1] == "terminal":
numFixedInsts += 1
elif curInst[-1] == "terminal_NI":
numPrimary += 1
print("Parsed Nodes: NumInsts: %d" % (numInsts))
print("Parsed Nodes: NumFixedInsts: %d" % (numFixedInsts))
print("Parsed Nodes: NumPrimary: %d" % (numPrimary))
print("")
def ParseNets(self, netName):
print("Parsing %s ..." % (netName) )
f = open(netName, 'r')
cont = f.read()
f.close()
tmpNetlist = []
for curLine in cont.split("\n"):
curLine = curLine.strip()
if curLine.startswith("UCLA") or curLine.startswith("#") \
or curLine == "":
continue
if curLine.startswith("NumNets"):
numNets = int(curLine.split(" ")[-1])
elif curLine.startswith("NumPins"):
numPins = int(curLine.split(" ")[-1])
else:
tmpNetlist.append(curLine.split())
print("From Nets: NumNets: %d" % (numNets))
print("From Nets: NumPins: %d" % (numPins))
self.bsNetList = []
for idx, curArr in enumerate(tmpNetlist):
if curArr[0] == "NetDegree":
numPinsInNet = int(curArr[2])
netName = curArr[3]
pinArr = [l for l in tmpNetlist[(idx+1):(idx+numPinsInNet+1)]]
self.bsNetList.append([netName, pinArr])
print("Parsed Nets: NumNets: %d" %(len(self.bsNetList)))
print("")
def ParseShapes(self, shapeName):
print("Parsing %s ..." % (shapeName) )
f = open(shapeName, 'r')
cont = f.read()
f.close()
tmpShapeList = []
for curLine in cont.split("\n"):
curLine = curLine.strip()
if curLine.startswith("UCLA") or curLine.startswith("#") \
or curLine == "":
continue
if curLine.startswith("NumNonRectangularNodes"):
numNonRectNodes = int(curLine.split(" ")[-1])
else:
tmpShapeList.append(curLine.split())
print("From Shapes: NumRectLinearInsts: %d" % (numNonRectNodes))
self.bsShapeList = []
for idx, curArr in enumerate(tmpShapeList):
# find 'InstName : numShapes' string
if curArr[1] == ":":
instName = curArr[0]
numShapesInInst = int(curArr[2])
shapeArr = tmpShapeList[(idx+1):(idx+numShapesInInst+1)]
self.bsShapeList.append([instName,shapeArr])
print("Parsed Shapes: NumRectLinearInsts: %d" %(len(self.bsShapeList)))
print("")
def ParseScl(self, sclName):
print("Parsing %s ..." % (sclName) )
f = open(sclName, 'r')
cont = f.read()
f.close()
tmpRowList = []
for curLine in cont.split("\n"):
curLine = curLine.strip()
if curLine.startswith("UCLA") or curLine.startswith("#") \
or curLine == "":
continue
if curLine.startswith("NumRows") or curLine.startswith("Numrows"):
numRows = int(curLine.split(" ")[-1])
else:
tmpRowList.append(curLine.split())
print("From scl: NumRows: %d" % (numRows))
# extract indices on CoreRow/End
coreRowIdxList = [idx for idx,tmpArr in enumerate(tmpRowList) if tmpArr[0] == "CoreRow"]
endIdxList = [idx for idx,tmpArr in enumerate(tmpRowList) if tmpArr[0] == "End"]
if len(coreRowIdxList) != len(endIdxList):
ErrorQuit("The number of CoreRow and End is different in scl!")
self.bsRowList = []
for idx1, idx2 in zip(coreRowIdxList, endIdxList):
self.bsRowList.append(tmpRowList[idx1:idx2])
print("Parsed scl: NumRows: %d" %(len(self.bsRowList)))
print("")
def ParsePl(self, plName):
print("Parsing %s ..." % (plName) )
f = open(plName, 'r')
cont = f.read()
f.close()
self.bsPlList = []
for curLine in cont.split("\n"):
curLine = curLine.strip()
if curLine.startswith("UCLA") | |
volume data as training data (Input)
training_data = price_volume_df.iloc[:, 1:3].values
# Normalize the data
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_data)
# Create the training and testing data, training data contains present day and previous day values
X = []
y = []
for i in range(1, len(price_volume_df)):
X.append(training_set_scaled [i-1:i, 0])
y.append(training_set_scaled [i, 0])
# Convert the data into array format
X = np.asarray(X)
y = np.asarray(y)
# Split the data for training, the rest for testing.
split = int(0.7 * len(X))
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
# Reshape the 1D arrays to 3D arrays to feed in the model
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
X_train.shape, X_test.shape
# Create the model
inputs = keras.layers.Input(shape=(X_train.shape[1], X_train.shape[2]))
x = keras.layers.LSTM(150, return_sequences= True)(inputs)
x = keras.layers.Dropout(0.3)(x)
x = keras.layers.LSTM(150, return_sequences=True)(x)
x = keras.layers.Dropout(0.3)(x)
x = keras.layers.LSTM(150)(x)
outputs = keras.layers.Dense(1, activation='linear')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss="mse")
model.summary()
# Train the model
history = model.fit(
X_train, y_train,
epochs = 20,
batch_size = 32,
validation_split = 0.2)
# Make prediction
predicted = model.predict(X)
# Append the predicted values to the list
test_predicted = []
for i in predicted:
test_predicted.append(i[0])
# We take the median loss to evaluate the model
media = sum(history.history["loss"]) / len(history.history["loss"])
#############################################################################################################################
# Step 5
# Creando una tabla con las proyecciones
df_predicted = price_volume_df[1:][['Date']]
# Creando la columna de Predicciones
df_predicted['predictions'] = test_predicted
# Plot the data
close = []
for i in training_set_scaled:
close.append(i[0])
# Juntando la tabla final con (Dia, Prediccion y Cierre)
df_predicted['Close'] = close[1:]
# Plot the data
#interactive_plot(df_predicted, "Original Vs Prediction")
st.write(f"Evaluation loss {round(media*100, 6)}%")
graphic_data = {}
graphic_data["Actual"]=df_predicted["Close"]
graphic_data["Prediction"] = df_predicted["predictions"]
st.line_chart(graphic_data)
st.subheader("Model Summary")
st.text("""
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 1, 1)] 0
lstm (LSTM) (None, 1, 150) 91200
dropout (Dropout) (None, 1, 150) 0
lstm_1 (LSTM) (None, 1, 150) 180600
dropout_1 (Dropout) (None, 1, 150) 0
lstm_2 (LSTM) (None, 150) 180600
dense (Dense) (None, 1) 151
=================================================================
Total params: 452,551
Trainable params: 452,551
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
1/14 [=>............................] - ETA: 56s - loss: 0.0290
7/14 [==============>...............] - ETA: 0s - loss: 0.0508
14/14 [==============================] - ETA: 0s - loss: 0.0423
14/14 [==============================] - 6s 92ms/step - loss: 0.0423 - val_loss: 0.3835
Epoch 2/20
1/14 [=>............................] - ETA: 0s - loss: 0.0472
8/14 [================>.............] - ETA: 0s - loss: 0.0248
13/14 [==========================>...] - ETA: 0s - loss: 0.0237
14/14 [==============================] - 0s 11ms/step - loss: 0.0241 - val_loss: 0.1809
Epoch 3/20
1/14 [=>............................] - ETA: 0s - loss: 0.0254
7/14 [==============>...............] - ETA: 0s - loss: 0.0204
13/14 [==========================>...] - ETA: 0s - loss: 0.0195
14/14 [==============================] - 0s 10ms/step - loss: 0.0196 - val_loss: 0.1402
Epoch 4/20
1/14 [=>............................] - ETA: 0s - loss: 0.0219
7/14 [==============>...............] - ETA: 0s - loss: 0.0119
14/14 [==============================] - 0s 10ms/step - loss: 0.0100 - val_loss: 0.0500
Epoch 5/20
1/14 [=>............................] - ETA: 0s - loss: 0.0049
7/14 [==============>...............] - ETA: 0s - loss: 0.0030
14/14 [==============================] - 0s 12ms/step - loss: 0.0022 - val_loss: 0.0062
Epoch 6/20
1/14 [=>............................] - ETA: 0s - loss: 1.8830e-04
8/14 [================>.............] - ETA: 0s - loss: 5.7900e-04
14/14 [==============================] - 0s 10ms/step - loss: 6.1269e-04 - val_loss: 0.0046
Epoch 7/20
1/14 [=>............................] - ETA: 0s - loss: 3.8925e-04
6/14 [===========>..................] - ETA: 0s - loss: 6.3538e-04
13/14 [==========================>...] - ETA: 0s - loss: 5.2828e-04
14/14 [==============================] - 0s 11ms/step - loss: 5.2585e-04 - val_loss: 0.0015
Epoch 8/20
1/14 [=>............................] - ETA: 0s - loss: 2.9108e-04
9/14 [==================>...........] - ETA: 0s - loss: 4.7043e-04
14/14 [==============================] - 0s 10ms/step - loss: 4.1497e-04 - val_loss: 0.0029
Epoch 9/20
1/14 [=>............................] - ETA: 0s - loss: 7.6592e-04
8/14 [================>.............] - ETA: 0s - loss: 4.3651e-04
14/14 [==============================] - 0s 10ms/step - loss: 5.1143e-04 - val_loss: 0.0046
Epoch 10/20
1/14 [=>............................] - ETA: 0s - loss: 4.3880e-04
8/14 [================>.............] - ETA: 0s - loss: 6.3981e-04
14/14 [==============================] - 0s 9ms/step - loss: 5.8694e-04 - val_loss: 0.0021
Epoch 11/20
1/14 [=>............................] - ETA: 0s - loss: 1.9836e-04
9/14 [==================>...........] - ETA: 0s - loss: 7.9962e-04
14/14 [==============================] - 0s 9ms/step - loss: 6.7786e-04 - val_loss: 0.0023
Epoch 12/20
1/14 [=>............................] - ETA: 0s - loss: 5.7881e-04
8/14 [================>.............] - ETA: 0s - loss: 5.3330e-04
14/14 [==============================] - 0s 11ms/step - loss: 5.5330e-04 - val_loss: 0.0029
Epoch 13/20
1/14 [=>............................] - ETA: 0s - loss: 3.2055e-04
8/14 [================>.............] - ETA: 0s - loss: 4.2544e-04
14/14 [==============================] - 0s 10ms/step - loss: 4.5558e-04 - val_loss: 0.0045
Epoch 14/20
1/14 [=>............................] - ETA: 0s - loss: 3.6282e-04
10/14 [====================>.........] - ETA: 0s - loss: 7.6032e-04
14/14 [==============================] - 0s 9ms/step - loss: 6.7844e-04 - val_loss: 0.0021
Epoch 15/20
1/14 [=>............................] - ETA: 0s - loss: 5.4045e-04
8/14 [================>.............] - ETA: 0s - loss: 7.3637e-04
14/14 [==============================] - 0s 10ms/step - loss: 6.6167e-04 - val_loss: 0.0014
Epoch 16/20
1/14 [=>............................] - ETA: 0s - loss: 0.0010
7/14 [==============>...............] - ETA: 0s - loss: 5.2453e-04
13/14 [==========================>...] - ETA: 0s - loss: 4.9783e-04
14/14 [==============================] - 0s 12ms/step - loss: 4.9552e-04 - val_loss: 0.0019
Epoch 17/20
1/14 [=>............................] - ETA: 0s - loss: 5.9480e-04
7/14 [==============>...............] - ETA: 0s - loss: 6.7032e-04
13/14 [==========================>...] - ETA: 0s - loss: 5.8481e-04
14/14 [==============================] - 0s 11ms/step - loss: 5.8287e-04 - val_loss: 0.0036
Epoch 18/20
1/14 [=>............................] - ETA: 0s - loss: 8.9031e-04
8/14 [================>.............] - ETA: 0s - loss: 4.5292e-04
14/14 [==============================] - 0s 11ms/step - loss: 4.2416e-04 - val_loss: 0.0018
Epoch 19/20
1/14 [=>............................] - ETA: 0s - loss: 1.5390e-04
10/14 [====================>.........] - ETA: 0s - loss: 4.6502e-04
14/14 [==============================] - 0s 10ms/step - loss: 5.2157e-04 - val_loss: 0.0034
Epoch 20/20
1/14 [=>............................] - ETA: 0s - loss: 7.4292e-04
8/14 [================>.............] - ETA: 0s - loss: 5.8960e-04
14/14 [==============================] - 0s 10ms/step - loss: 5.7970e-04 - val_loss: 0.0024""")
# Logistic Regresion
if option == 'Logistic Regression':
top_cryptos_df.Logo = path_to_image_html(top_cryptos_df.Logo)
st.write(top_cryptos_df.to_html(escape=False), unsafe_allow_html=True)
st.text("")
st.subheader("Performance Forecasting - Logistic Regression Model")
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
coin_list['Ticker'] = coin_choice
# Find 'Act Returns' and assign to new column.
coin_list_df = coin_list
coin_list_df['Actual Returns'] = coin_list_df['Adj Close'].pct_change()
# Set 'Short' and 'Long' window
short_window = 4
long_window = 50
# Find the rolling average and assign to new columns labeled SMA_Fast, SMA_Slow.
coin_list_df['SMA_Slow'] = coin_list_df['Adj Close'].rolling(window=short_window).mean()
coin_list_df['SMA_Fast'] = coin_list_df['Adj Close'].rolling(window=long_window).mean()
# Assign SMA columns to X
X = coin_list_df[['SMA_Fast', 'SMA_Slow']].shift().dropna().copy()
# Create a new column named 'Signal' as a place holder.
coin_list_df['Signal'] = 0.0
# Set signals based on actual returns being greater or less than 0.
coin_list_df.loc[(coin_list_df['Actual Returns'] >= 0), 'Signal'] = 1
coin_list_df.loc[(coin_list_df['Actual Returns'] < 0), 'Signal'] = -1
# Set y dataset to 'Signal' column and drop nan values.
y = coin_list_df['Signal'].dropna()
y = y.iloc[50:]
coin_list_df.dropna()
# Import train_test_split function to separate data into X and y datasets.
X_train, X_test, y_train, y_test = train_test_split(X,y, random_state=1)
# Import StandardScaler() function and scale X_train and X_test datasets.
scaler = StandardScaler()
X_scaler = scaler.fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Import LR and instantiate a model.
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
testing_predictions = lr_model.predict(X_test)
# Import and print accuracy report.
accuracy = accuracy_score(y_test, testing_predictions)
res = round(accuracy,1) * 100
st.subheader(f"Accuracy of the Logistic Regression Model is {res} %")
st.write(accuracy)
# Import and print classification report.
training_report = classification_report(y_test, testing_predictions)
res_1 = training_report
st.subheader(f"{res_1}")
testing_matrix = confusion_matrix(y_test, testing_predictions)
# Print confusion matrix.
st.subheader(f"Confusion Matrix")
st.text("The Confusion Matrix is a table that allows visualization of the performance of an algorithm")
st.caption("Predicted True / Actually True Predicted False / Actually True")
st.caption("Predicted False / Actually True Predicted False / Actually False")
st.write(testing_matrix)
# Comments on Conclusion ect.
st.subheader("Conclusions")
st.text("""
For this Logistic Regression Model I used a 4 & 100 day Moving Average as the feature, and a buy and sell signal
as the target. The Signals were defined as: days where | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WeatherOperations(object):
"""WeatherOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
farmer_id, # type: str
boundary_id, # type: str
extension_id, # type: str
weather_data_type, # type: str
granularity, # type: str
start_date_time=None, # type: Optional[datetime.datetime]
end_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WeatherDataListResponse"]
"""Returns a paginated list of weather data.
:param farmer_id: Farmer ID.
:type farmer_id: str
:param boundary_id: Boundary ID.
:type boundary_id: str
:param extension_id: ID of the weather extension.
:type extension_id: str
:param weather_data_type: Type of weather data (forecast/historical).
:type weather_data_type: str
:param granularity: Granularity of weather data (daily/hourly).
:type granularity: str
:param start_date_time: Weather data start UTC date-time (inclusive), sample format:
yyyy-MM-ddTHH:mm:ssZ.
:type start_date_time: ~datetime.datetime
:param end_date_time: Weather data end UTC date-time (inclusive), sample format:
yyyy-MM-ddTHH:mm:ssZ.
:type end_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WeatherDataListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.WeatherDataListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WeatherDataListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['farmerId'] = self._serialize.query("farmer_id", farmer_id, 'str')
query_parameters['boundaryId'] = self._serialize.query("boundary_id", boundary_id, 'str')
query_parameters['extensionId'] = self._serialize.query("extension_id", extension_id, 'str', pattern=r'^[A-za-z]{3,50}[.][A-za-z]{3,100}$')
query_parameters['weatherDataType'] = self._serialize.query("weather_data_type", weather_data_type, 'str', max_length=50, min_length=0)
query_parameters['granularity'] = self._serialize.query("granularity", granularity, 'str', max_length=50, min_length=0)
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query("start_date_time", start_date_time, 'iso-8601')
if end_date_time is not None:
query_parameters['endDateTime'] = self._serialize.query("end_date_time", end_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WeatherDataListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/weather'} # type: ignore
def get_data_ingestion_job_details(
self,
job_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WeatherDataIngestionJob"
"""Get weather ingestion job.
:param job_id: ID of the job.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WeatherDataIngestionJob, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.WeatherDataIngestionJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WeatherDataIngestionJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get_data_ingestion_job_details.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('WeatherDataIngestionJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_data_ingestion_job_details.metadata = {'url': '/weather/ingest-data/{jobId}'} # type: ignore
def _create_data_ingestion_job_initial(
self,
job_id, # type: str
job=None, # type: Optional["_models.WeatherDataIngestionJob"]
**kwargs # type: Any
):
# type: (...) -> "_models.WeatherDataIngestionJob"
cls = kwargs.pop('cls', None) # type: ClsType["_models.WeatherDataIngestionJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_data_ingestion_job_initial.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if job is not None:
body_content = self._serialize.body(job, 'WeatherDataIngestionJob')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('WeatherDataIngestionJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_data_ingestion_job_initial.metadata = {'url': '/weather/ingest-data/{jobId}'} # type: ignore
def begin_create_data_ingestion_job(
self,
job_id, # type: str
job=None, # type: Optional["_models.WeatherDataIngestionJob"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.WeatherDataIngestionJob"]
"""Create a weather data ingestion job.
:param job_id: Job id supplied by user.
:type job_id: str
:param job: Job parameters supplied by user.
:type job: ~azure.agrifood.farming.models.WeatherDataIngestionJob
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either WeatherDataIngestionJob or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.agrifood.farming.models.WeatherDataIngestionJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WeatherDataIngestionJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_data_ingestion_job_initial(
job_id=job_id,
job=job,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('WeatherDataIngestionJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
if polling is True: polling_method = LROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_data_ingestion_job.metadata = {'url': '/weather/ingest-data/{jobId}'} # type: ignore
def get_data_delete_job_details(
self,
job_id, | |
n = {"Bonds": 2, "Angles": 3, "Dihedrals": 4, "Impropers": 4}
parse_line = lambda l: {"type": int(l[1]), kw[:-1].lower():
[int(x) for x in l[2:n[kw] + 2]]}
elif kw == "Atoms":
keys = ATOMS_LINE_FORMAT[atom_style][:]
sample_l = single_section_lines[1].split()
if len(sample_l) == len(keys) + 1:
pass
elif len(sample_l) == len(keys) + 4:
keys += ["nx", "ny", "nz"]
else:
warnings.warn("Atoms section format might be imcompatible"
" with atom_style %s." % atom_style)
float_keys = [k for k in keys if k in ATOMS_FLOATS]
parse_line = lambda l: {k: float(v) if k in float_keys
else int(v) for (k, v) in zip(keys, l[1:len(keys) + 1])}
elif kw == "Velocities":
parse_line = lambda l: {"velocity": [float(x)
for x in l[1:4]]}
elif kw == "Masses":
parse_line = lambda l: {"mass": float(l[1])}
else:
warnings.warn("%s section parser has not been implemented. "
"Skipping..." % kw)
return kw, []
section = []
splitted_lines = [l.split() for l in single_section_lines[1:]]
if sort_id and kw != "Pair<NAME>":
splitted_lines = sorted(splitted_lines,
key=lambda l: int(l[0]))
for l in splitted_lines:
line_data = parse_line(l)
if kw != "Pair<NAME>":
line_data["id"] = int(l[0])
section.append(line_data)
return kw, section
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + topo_sections and not seen_atoms:
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if body.get("Velocities") else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in topo_sections:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["box_bounds"] = header["bounds"]
items["box_tilt"] = header.get("tilt")
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body.keys() if k in SECTION_KEYWORDS["ff"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body.keys()
if k in SECTION_KEYWORDS["molecule"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, ff, topologies, box_bounds, box_tilt=None,
atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects.
Args:
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set(itertools.chain(*[t.types for t in topologies]))
assert atom_types.issubset(ff.atom_map.keys()),\
"Unknown atom type found in topologies"
items = {"box_bounds": box_bounds, "box_tilt": box_tilt,
"atom_style": atom_style}
items["masses"] = ff.masses
lookup = {"Atoms": ff.atom_map}
pair_coeffs = ff.get_pair_coeffs()
mol_coeffs = getattr(ff, "mol_coeffs")
force_field = {} if any((pair_coeffs, mol_coeffs)) else None
if pair_coeffs:
force_field.update(pair_coeffs)
if mol_coeffs:
for kw in mol_coeffs.keys():
coeffs, mapper = ff.get_coeffs_and_mapper(kw)
force_field.update(coeffs)
lookup[kw[:-7] + "s"] = mapper
items["force_field"] = force_field
atoms = []
velocities = [] if topologies[0].velocities else None
topology = {k: [] for k in SECTION_KEYWORDS["molecule"]}
stack = {k: 0 for k in ["Atoms"] + SECTION_KEYWORDS["molecule"]}
atom_format = ATOMS_LINE_FORMAT[atom_style]
for mid, topo in enumerate(topologies):
map_inds = lambda inds: tuple([topo.types[i] for i in inds])
topo_atoms = []
for aid, (s, t) in enumerate(zip(topo.sites, topo.types)):
d_atom = {"id": aid + 1 + stack["Atoms"],
"type": lookup["Atoms"][t]}
d_atom.update({k: getattr(s, k) for k in "xyz"})
if "molecule-ID" in atom_format:
d_atom["molecule-ID"] = mid + 1
topo_atoms.append(d_atom)
if "q" in atom_format:
charges = [0.0] * len(topo.sites) if not topo.charges \
else topo.charges
for d_atom, q in zip(topo_atoms, charges):
d_atom["q"] = q
atoms.extend(topo_atoms)
if isinstance(velocities, list):
velocities.extend({"id": aid + 1 + stack["Atoms"],
"velocity": v}
for aid, v in enumerate(topo.velocities))
if topo.topologies:
for kw in topo.topologies.keys():
topo_lookup = lookup[kw]
unfiltered_indices = np.array(topo.topologies[kw])
topo_topos = []
tid = stack[kw]
for inds in unfiltered_indices:
topo_type = topo_lookup.get(map_inds(inds))
if topo_type:
topo_inds = list(inds + stack["Atoms"] + 1)
topo_topos.append({"id": tid + 1,
"type": topo_type,
kw.lower()[:-1]: topo_inds})
tid += 1
topology[kw].extend(topo_topos)
stack[kw] = tid
stack["Atoms"] += len(topo_atoms)
topology = {k: v for k, v in topology.items() if len(v) > 0}
topology = None if len(topology) == 0 else topology
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE single SiteCollection or its subclasses
(Molecule/Structure), or a plain list of Sites.
"""
def __init__(self, sites, atom_type=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
atom_type (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, SiteCollection):
sites = Molecule.from_sites(sites)
if atom_type:
types = sites.site_properties.get(atom_type)
else:
types = [site.species_string for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),),\
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: topologies[k] for k in
SECTION_KEYWORDS["molecule"] if k in topologies}
self.sites = sites
self.atom_type = atom_type
self.types = types
self.charges = charges
self.velocities = velocities
self.topologies = topologies
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
atom_type=None, charges=None, velocities=None, tol=0.1):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
atom_type (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
return cls(sites=molecule, atom_type=atom_type, charges=charges,
velocities=velocities)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)]
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = list(np.unique(bond_arr[ix]))
bonds.remove(hub)
hub_spokes[hub] = bonds
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, | |
elif token in ["vmt", "vdt"]:
dy = argList[0]
curX += dx
curY += dy
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
if opName == "move":
if outlineItem is not None:
if len(outlineItem) == 1:
# Just in case we see 2 moves in a row, delete the
# previous outlineItem if it has only the move-to
print("Deleting moveto: %s adding %s" % (
xmlToString(newOutline[-1]),
xmlToString(outlineItem)))
del newOutline[-1]
else:
# Fix the start/implied end path of the
# previous path.
fixStartPoint(outlineItem, opList)
opList = []
outlineItem = XMLElement('contour')
newOutline.append(outlineItem)
if newHintMaskName is not None:
newPoint.set(kPointName, newHintMaskName)
newHintMaskName = None
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
else:
if token in ["ct", "cv"]:
curX = argList[0]
curY = argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[2]
curY = argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[4]
curY = argList[5]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
else:
if token in ["rct", "rcv"]:
curX += argList[0]
curY += argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[2]
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[4]
curY += argList[5]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
elif token == "vhct":
curY += argList[0]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[1]
curY += argList[2]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
elif token == "hvct":
curX += argList[0]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[1]
curY += argList[2]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
if newHintMaskName is not None:
# attach the pointName to the first point of the curve.
outlineItem[-3].set(kPointName, newHintMaskName)
newHintMaskName = None
opList.append([opName, curX, curY])
argList = []
if outlineItem is not None:
if len(outlineItem) == 1:
# Just in case we see two moves in a row, delete the previous
# outlineItem if it has zero length.
del newOutline[-1]
else:
fixStartPoint(outlineItem, opList)
# add hints, if any
# Must be done at the end of op processing to make sure we have seen
# all the hints in the bez string.
# Note that the hintmasks are identified in the opList by the point name.
# We will follow the T1 spec: a glyph may have stem3 counter hints or
# regular hints, but not both.
if (seenHints) or (len(flexList) > 0):
hintInfoDict = XMLElement("dict")
hintSetListItem = XMLElement("key")
hintSetListItem.text = kHintSetListName
hintInfoDict.append(hintSetListItem)
hintSetListArray = XMLElement("array")
hintInfoDict.append(hintSetListArray)
# Convert the rest of the hint masks to a hintmask op
# and hintmask bytes.
for hintMask in hintMaskList:
hintMask.addHintSet(hintSetListArray)
if len(flexList) > 0:
hintSetListItem = XMLElement("key")
hintSetListItem.text = kFlexIndexListName
hintInfoDict.append(hintSetListItem)
flexArray = XMLElement("array")
hintInfoDict.append(flexArray)
addFlexHint(flexList, flexArray)
# JH 24 Sep 2019
# hash now goes at end of glyphDict to match psautohint
idItem = XMLElement("key")
idItem.text = "id"
hintInfoDict.append(idItem)
idString = XMLElement("string")
idString.text = kHashIdPlaceholder
hintInfoDict.append(idString)
return newOutline, hintInfoDict
def addHintList(hints, hintsStem3, newHintSetArray, isH):
# A charstring may have regular vstem hints or vstem3 hints, but not both.
# Same for hstem hints and hstem3 hints.
if len(hintsStem3) > 0:
hintsStem3.sort()
numHints = len(hintsStem3)
hintLimit = int((kStackLimit - 2) / 2)
if numHints >= hintLimit:
hintsStem3 = hintsStem3[:hintLimit]
numHints = hintLimit
makeStemHintList(hintsStem3, newHintSetArray, isH)
else:
hints.sort()
numHints = len(hints)
hintLimit = int((kStackLimit - 2) / 2)
if numHints >= hintLimit:
hints = hints[:hintLimit]
numHints = hintLimit
makeHintList(hints, newHintSetArray, isH)
def addWhiteSpace(parent, level):
child = None
childIndent = '\n' + (" " * (level + 1))
prentIndent = '\n' + (" " * (level))
# print("parent Tag", parent.tag, repr(parent.text), repr(parent.tail))
for child in parent:
child.tail = childIndent
addWhiteSpace(child, level + 1)
if child is not None:
if parent.text is None:
parent.text = childIndent
child.tail = prentIndent
# print("lastChild Tag", child.tag, repr(child.text),
# repr(child.tail), "parent Tag", parent.tag)
def convertBezToGLIF(ufoFontData, glyphName, bezString, hintsOnly=False):
# I need to replace the contours with data from the bez string.
glyphPath = ufoFontData.getGlyphSrcPath(glyphName)
with open(glyphPath, "rb") as fp:
data = fp.read()
glifXML = XML(data)
outlineItem = None
libIndex = outlineIndex = -1
childIndex = 0
for childElement in glifXML:
if childElement.tag == "outline":
outlineItem = childElement
outlineIndex = childIndex
if childElement.tag == "lib":
libIndex = childIndex
childIndex += 1
newOutlineElement, hintInfoDict = convertBezToOutline(
ufoFontData, glyphName, bezString)
# print xmlToString(stemHints)
if not hintsOnly:
if outlineItem is None:
# need to add it. Add it before the lib item, if any.
if libIndex > 0:
glifXML.insert(libIndex, newOutlineElement)
else:
glifXML.append(newOutlineElement)
else:
# remove the old one and add the new one.
glifXML.remove(outlineItem)
glifXML.insert(outlineIndex, newOutlineElement)
# convertBezToGLIF is called only if the GLIF has been edited by a tool.
# We need to update the edit status in the has map entry.
# I assume that convertGLIFToBez has ben run before, which will add an
# entry for this glyph.
ufoFontData.updateHashEntry(glyphName, changed=True)
# Add the stem hints.
if hintInfoDict is not None:
widthXML = glifXML.find("advance")
if widthXML is not None:
width = int(ast.literal_eval(widthXML.get("width", '0')))
else:
width = 0
useDefaultGlyphDir = False
newGlyphHash, _ = ufoFontData.buildGlyphHashValue(
width, newOutlineElement, glyphName, useDefaultGlyphDir)
# We add this hash to the T1 data, as it is the hash which matches
# the output outline data. This is not necessarily the same as the
# hash of the source data - autohint can be used to change outlines.
if libIndex > 0:
libItem = glifXML[libIndex]
else:
libItem = XMLElement("lib")
glifXML.append(libItem)
dictItem = libItem.find("dict")
if dictItem is None:
dictItem = XMLElement("dict")
libItem.append(dictItem)
# Remove any existing hint data.
i = 0
childList = list(dictItem)
for childItem in childList:
i += 1
if (childItem.tag == "key") and (
(childItem.text == kHintDomainName1) or
(childItem.text == kHintDomainName2)):
dictItem.remove(childItem) # remove key
dictItem.remove(childList[i]) # remove data item.
glyphDictItem = dictItem
key = XMLElement("key")
key.text = kHintDomainName2
glyphDictItem.append(key)
glyphDictItem.append(hintInfoDict)
# As of September, 2019, the hash should be at the end of the glyph
# dict, so we iterate backwards from the end until we find the
# placeholder, then set to newGlyphHash
childList = list(hintInfoDict)
for child in childList[::-1]:
if getattr(child, 'text', "") == kHashIdPlaceholder:
child.text = newGlyphHash
break
addWhiteSpace(glifXML, 0)
return glifXML
def _get_glyph_width(glyph):
hash_pen = HashPointPen(glyph)
glyph.drawPoints(hash_pen)
return getattr(glyph, 'width', 0)
def regenerate_glyph_hashes(ufo_font_data):
"""
The handling of the glyph hashes is super convoluted.
This method fixes https://github.com/adobe-type-tools/afdko/issues/349
"""
for gname, gfilename in ufo_font_data.getGlyphMap().items():
gwidth, _, outline_xml = ufo_font_data.getGlyphXML(
ufo_font_data.glyphDefaultDir, gfilename)
hash_entry = ufo_font_data.hashMap.get(gname, None)
if not hash_entry:
continue
ghash, _ = ufo_font_data.buildGlyphHashValue(
gwidth, outline_xml, gname, True)
hash_entry[0] = ghash
def checkHashMaps(fontPath, doSync):
"""
Checks if the hashes of the glyphs in the default layer match the hash
values stored in the UFO's 'data/com.adobe.type.processedHashMap' file.
Returns a tuple of a boolean and a list. The boolean is True if all glyph
hashes matched. The list contains strings that report the glyph names
whose hash did not match.
If doSync is True, it will delete any glyph in the processed glyph
layer directory which does not have a matching glyph in the default
layer, or whose source | |
<reponame>nccreang/pyUSID<filename>examples/beginner/plot_hdf_utils_read.py
"""
================================================================================
05. Utilities for reading h5USID files
================================================================================
**<NAME>**
4/18/2018
**This document illustrates the many handy functions in pyUSID.hdf_utils that significantly simplify reading data
and metadata in Universal Spectroscopy and Imaging Data (USID) HDF5 files (h5USID files)**
"""
########################################################################################################################
# Introduction
# -------------
# The USID model uses a data-centric approach to data analysis and processing meaning that results from all data analysis
# and processing are written to the same h5 file that contains the recorded measurements. **Hierarchical Data Format
# (HDF5)** files allow data, whether it is raw measured data or results of analysis, to be stored in multiple datasets within
# the same file in a tree-like manner. Certain rules and considerations have been made in pyUSID to ensure
# consistent and easy access to any data.
#
# The h5py python package provides great functions to create, read, and manage data in HDF5 files. In
# ``pyUSID.hdf_utils``, we have added functions that facilitate scientifically relevant, or USID specific
# functionality such as checking if a dataset is a Main dataset, reshaping to / from the original N dimensional form of
# the data, etc. Due to the wide breadth of the functions in ``hdf_utils``, the guide for hdf_utils will be split in two
# parts - one that focuses on functions that facilitate reading and one that facilitate writing of data. The following
# guide provides examples of how, and more importantly when, to use functions in ``pyUSID.hdf_utils`` for various
# scenarios.
#
# Recommended pre-requisite reading
# ---------------------------------
# * `Universal Spectroscopic and Imaging Data (USID) model </../../../USID/usid_model.html>`_
# * `Crash course on HDF5 and h5py <./plot_h5py.html>`_
#
# .. tip::
# You can download and run this document as a Jupyter notebook using the link at the bottom of this page.
#
# Import all necessary packages
# -------------------------------
#
# Before we begin demonstrating the numerous functions in ``pyUSID.hdf_utils``, we need to import the necessary
# packages. Here are a list of packages besides pyUSID that will be used in this example:
#
# * ``h5py`` - to open and close the file
# * ``wget`` - to download the example data file
# * ``numpy`` - for numerical operations on arrays in memory
# * ``matplotlib`` - basic visualization of data
from __future__ import print_function, division, unicode_literals
import os
# Warning package in case something goes wrong
from warnings import warn
import subprocess
import sys
def install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
# Package for downloading online files:
try:
# This package is not part of anaconda and may need to be installed.
import wget
except ImportError:
warn('wget not found. Will install with pip.')
import pip
install(wget)
import wget
import h5py
import numpy as np
import matplotlib.pyplot as plt
# Finally import pyUSID.
try:
import pyUSID as usid
except ImportError:
warn('pyUSID not found. Will install with pip.')
import pip
install('pyUSID')
import pyUSID as usid
########################################################################################################################
# In order to demonstrate the many functions in hdf_utils, we will be using a h5USID file containing real
# experimental data along with results from analyses on the measurement data
#
# This scientific dataset
# -----------------------
#
# For this example, we will be working with a **Band Excitation Polarization Switching (BEPS)** dataset acquired from
# advanced atomic force microscopes. In the much simpler **Band Excitation (BE)** imaging datasets, a single spectrum is
# acquired at each location in a two dimensional grid of spatial locations. Thus, BE imaging datasets have two
# position dimensions (``X``, ``Y``) and one spectroscopic dimension (``Frequency`` - against which the spectrum is recorded).
# The BEPS dataset used in this example has a spectrum for **each combination of** three other parameters (``DC offset``,
# ``Field``, and ``Cycle``). Thus, this dataset has three new spectral dimensions in addition to ``Frequency``. Hence,
# this dataset becomes a 2+4 = **6 dimensional dataset**
#
# Load the dataset
# ------------------
# First, let us download this file from the pyUSID Github project:
url = 'https://raw.githubusercontent.com/pycroscopy/pyUSID/master/data/BEPS_small.h5'
h5_path = 'temp.h5'
_ = wget.download(url, h5_path, bar=None)
print('Working on:\n' + h5_path)
########################################################################################################################
# Next, lets open this HDF5 file in read-only mode. Note that opening the file does not cause the contents to be
# automatically loaded to memory. Instead, we are presented with objects that refer to specific HDF5 datasets,
# attributes or groups in the file
h5_path = 'temp.h5'
h5_f = h5py.File(h5_path, mode='r')
########################################################################################################################
# Here, ``h5_f`` is an active handle to the open file
#
# Inspect HDF5 contents
# ======================
#
# The file contents are stored in a tree structure, just like files on a contemporary computer. The file contains
# groups (similar to file folders) and datasets (similar to spreadsheets).
# There are several datasets in the file and these store:
#
# * The actual measurement collected from the experiment
# * Spatial location on the sample where each measurement was collected
# * Information to support and explain the spectral data collected at each location
# * Since the USID model stores results from processing and analyses performed on the data in the same h5USID file,
# these datasets and groups are present as well
# * Any other relevant ancillary information
#
# print_tree()
# ------------
# Soon after opening any file, it is often of interest to list the contents of the file. While one can use the open
# source software HDFViewer developed by the HDF organization, ``pyUSID.hdf_utils`` also has a very handy function -
# ``print_tree()`` to quickly visualize all the datasets and groups within the file within python.
print('Contents of the H5 file:')
usid.hdf_utils.print_tree(h5_f)
########################################################################################################################
# By default, ``print_tree()`` presents a clean tree view of the contents of the group. In this mode, only the group names
# are underlined. Alternatively, it can print the full paths of each dataset and group, with respect to the group / file
# of interest, by setting the ``rel_paths``
# keyword argument. ``print_tree()`` could also be used to display the contents of and HDF5 group instead of complete HDF5
# file as we have done above. Lets configure it to print the relative paths of all objects within the ``Channel_000``
# group:
usid.hdf_utils.print_tree(h5_f['/Measurement_000/Channel_000/'], rel_paths=True)
########################################################################################################################
# Finally, ``print_tree()`` can also be configured to only print USID Main datasets besides Group objects using the
# ``main_dsets_only`` option
usid.hdf_utils.print_tree(h5_f, main_dsets_only=True)
########################################################################################################################
# Accessing Attributes
# ==================================
#
# HDF5 datasets and groups can also store metadata such as experimental parameters. These metadata can be text,
# numbers, small lists of numbers or text etc. These metadata can be very important for understanding the datasets
# and guide the analysis routines.
#
# While one could use the basic ``h5py`` functionality to access attributes, one would encounter a lot of problems when
# attempting to decode attributes whose values were strings or lists of strings due to some issues in ``h5py``. This problem
# has been demonstrated in our `primer to HDF5 and h5py <./plot_h5py.html>`_. Instead of using the basic functionality of ``h5py``, we recommend always
# using the functions in pyUSID that reliably and consistently work for any kind of attribute for any version of
# python:
#
# get_attributes()
# ----------------
#
# ``get_attributes()`` is a very handy function that returns all or a specified set of attributes in an HDF5 object. If no
# attributes are explicitly requested, all attributes in the object are returned:
for key, val in usid.hdf_utils.get_attributes(h5_f).items():
print('{} : {}'.format(key, val))
########################################################################################################################
# ``get_attributes()`` is also great for only getting selected attributes. For example, if we only cared about the user
# and project related attributes, we could manually request for any that we wanted:
proj_attrs = usid.hdf_utils.get_attributes(h5_f, ['project_name', 'project_id', 'user_name'])
for key, val in proj_attrs.items():
print('{} : {}'.format(key, val))
########################################################################################################################
# get_attr()
# ----------
#
# If we are sure that we only wanted a specific attribute, we could instead use ``get_attr()`` as:
print(usid.hdf_utils.get_attr(h5_f, 'user_name'))
########################################################################################################################
# check_for_matching_attrs()
# --------------------------
# Consider the scenario where we are have several HDF5 files or Groups or datasets and we wanted to check each one to
# see if they have the certain metadata / attributes. ``check_for_matching_attrs()`` is one very handy function that
# simplifies the comparision operation.
#
# For example, let us check if this file was authored by ``<NAME>``:
print(usid.hdf_utils.check_for_matching_attrs(h5_f, new_parms={'user_name': '<NAME>'}))
########################################################################################################################
# Finding datasets and groups
# ============================
#
# There are numerous ways to search for and access datasets and groups in H5 files using the basic functionalities
# of h5py. pyUSID.hdf_utils contains several functions that simplify common searching / lookup operations as part of
# scientific workflows.
#
# find_dataset()
# ----------------
#
# The ``find_dataset()`` function will return all datasets that whose names contain the provided string. In this case, we
# | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for sonic_bgp_neighbors
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: sonic_bgp_neighbors
version_added: 1.0.0
notes:
- Tested against Enterprise SONiC Distribution by Dell Technologies.
- Supports C(check_mode).
short_description: Manage a BGP neighbor and its parameters
description:
- This module provides configuration management of global BGP_NEIGHBORS parameters on devices running Enterprise SONiC.
- bgp_as and vrf_name must be created on the device in advance.
author: <NAME> (@abirami-n)
options:
config:
description: Specifies the BGP neighbors related configuration.
type: list
elements: dict
suboptions:
bgp_as:
description:
- Specifies the BGP autonomous system (AS) number which is already configured on the device.
type: str
required: True
vrf_name:
description:
- Specifies the VRF name which is already configured on the device.
default: default
type: str
peer_group:
description: Specifies the list of peer groups.
type: list
elements: dict
suboptions:
name:
description: Name of the peer group.
type: str
required: True
remote_as:
description:
- Remote AS of the BGP peer group to configure.
- peer_as and peer_type are mutually exclusive.
type: dict
suboptions:
peer_as:
description:
- Specifies remote AS number.
- The range is from 1 to 4294967295.
type: int
peer_type:
description:
- Specifies the type of BGP peer.
type: str
choices:
- internal
- external
bfd:
description:
- Enables or disables BFD.
type: bool
advertisement_interval:
description:
- Specifies the minimum interval between sending BGP routing updates.
- The range is from 0 to 600.
type: int
timers:
description:
- Specifies BGP peer group timer related configurations.
type: dict
suboptions:
keepalive:
description:
- Frequency with which the device sends keepalive messages to its peer, in seconds.
- The range is from 0 to 65535.
type: int
holdtime:
description:
- Interval after not receiving a keepalive message that Enterprise SONiC declares a peer dead, in seconds.
- The range is from 0 to 65535.
type: int
capability:
description:
- Specifies capability attributes to this peer group.
type: dict
suboptions:
dynamic:
description:
- Enables or disables dynamic capability to this peer group.
type: bool
extended_nexthop:
description:
- Enables or disables advertise extended next-hop capability to the peer.
type: bool
address_family:
description:
- Holds of list of address families associated to the peergroup.
type: dict
suboptions:
afis:
description:
- List of address families with afi, safi, activate and allowas-in parameters.
- afi and safi are required together.
type: list
elements: dict
suboptions:
afi:
description:
- Holds afi mode.
type: str
choices:
- ipv4
- ipv6
- l2vpn
safi:
description:
- Holds safi mode.
type: str
choices:
- unicast
- evpn
activate:
description:
- Enable or disable activate.
type: bool
allowas_in:
description:
- Holds AS value.
- The origin and value are mutually exclusive.
type: dict
suboptions:
origin:
description:
- Set AS as the origin.
type: bool
value:
description:
- Holds AS number in the range 1-10.
type: int
neighbors:
description: Specifies BGP neighbor-related configurations.
type: list
elements: dict
suboptions:
neighbor:
description:
- Neighbor router address.
type: str
required: True
remote_as:
description:
- Remote AS of the BGP neighbor to configure.
- peer_as and peer_type are mutually exclusive.
type: dict
suboptions:
peer_as:
description:
- Specifies remote AS number.
- The range is from 1 to 4294967295.
type: int
peer_type:
description:
- Specifies the type of BGP peer.
type: str
choices:
- internal
- external
bfd:
description:
- Enables or disables BFD.
type: bool
advertisement_interval:
description:
- Specifies the minimum interval between sending BGP routing updates.
- The range is from 0 to 600.
type: int
peer_group:
description:
- The name of the peer group that the neighbor is a member of.
type: str
timers:
description:
- Specifies BGP neighbor timer-related configurations.
type: dict
suboptions:
keepalive:
description:
- Frequency with which the device sends keepalive messages to its peer, in seconds.
- The range is from 0 to 65535.
type: int
holdtime:
description:
- Interval after not receiving a keepalive message that SONiC declares a peer dead, in seconds.
- The range is from 0 to 65535.
type: int
capability:
description:
- Specifies capability attributes to this neighbor.
type: dict
suboptions:
dynamic:
description:
- Enables or disables dynamic capability to this neighbor.
type: bool
extended_nexthop:
description:
- Enables or disables advertise extended next-hop capability to the peer.
type: bool
state:
description:
- Specifies the operation to be performed on the BGP process that is configured on the device.
- In case of merged, the input configuration is merged with the existing BGP configuration on the device.
- In case of deleted, the existing BGP configuration is removed from the device.
default: merged
type: str
choices:
- merged
- deleted
"""
EXAMPLES = """
# Using deleted
#
# Before state:
# -------------
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
# !
# neighbor interface Eth1/3
#!
#router bgp 11
# network import-check
# timers 60 180
# !
# neighbor 192.168.1.4
# !
# peer-group SP1
# bfd
# capability dynamic
# !
# peer-group SP2
# !
#
- name: Deletes all BGP neighbors
dellemc.enterprise_sonic.sonic_bgp_neighbors:
config:
state: deleted
#
# After state:
# -------------
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
#!
#router bgp 11
# network import-check
# timers 60 180
# !
#
# Using merged
#
# Before state:
# ------------
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
#!
#router bgp 11
# network import-check
# timers 60 180
# !
- name: "Adds sonic_bgp_neighbors"
dellemc.enterprise_sonic.sonic_bgp_neighbors:
config:
- bgp_as: 51
vrf_name: VrfReg1
peer_group:
- name: SPINE
bfd: true
capability:
dynamic: true
extended_nexthop: true
remote_as:
peer_as: 4
address_family:
afis:
- afi: ipv4
safi: unicast
activate: true
allowas_in:
origin: true
- afi: ipv6
safi: unicast
activate: true
allowas_in:
value: 5
neighbors:
- neighbor: Eth1/3
remote_as:
peer_as: 10
peer_group: SPINE
advertisement_interval: 15
timers:
keepalive: 30
holdtime: 15
bfd: true
capability:
dynamic: true
extended_nexthop: true
- neighbor: 192.168.1.4
state: merged
#
# After state:
# ------------
#!
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
# !
# peer-group SPINE
# remote-as 4
# bfd
# capability dynamic
# capability extended-nexthop
# address-family ipv4 unicast
# activate
# allowas-in origin
# send-community both
# !
# address-family ipv6 unicast
# activate
# allowas-in 5
# send-community both
# !
# neighbor interface Eth1/3
# peer-group SPINE
# remote-as 10
# timers 15 30
# advertisement-interval 15
# bfd
# capability extended-nexthop
# capability dynamic
# !
# neighbor 192.168.1.4
#!
#router bgp 11
# network import-check
# timers 60 180
#
# Using deleted
#
# Before state:
# ------------
#!
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
# !
# peer-group SPINE
# bfd
# remote-as 4
# !
# neighbor interface Eth1/3
# peer-group SPINE
# remote-as 10
# timers 15 30
# advertisement-interval 15
# bfd
# capability extended-nexthop
# capability dynamic
# !
# neighbor 192.168.1.4
#!
#router bgp 11
# network import-check
# timers 60 18
# !
# peer-group SP
# !
# neighbor interface Eth1/3
#
- name: "Deletes sonic_bgp_neighbors and peer-groups specific to vrfname"
dellemc.enterprise_sonic.sonic_bgp_neighbors:
config:
- bgp_as: 51
vrf_name: VrfReg1
state: deleted
# After state:
# ------------
#!
#router bgp 11 vrf VrfCheck2
# network import-check
# timers 60 180
#!
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
# !
#router bgp 11
# network import-check
# timers 60 18
# !
# peer-group SP
# !
# neighbor interface Eth1/3
#
# Using deleted
#
# Before state:
# -------------
#
#router bgp 51 vrf VrfReg1
# network import-check
# timers 60 180
# !
# peer-group SPINE
# bfd
# remote-as 4
# !
# neighbor interface Eth1/3
# peer-group SPINE
# remote-as 10
# timers 15 30
# advertisement-interval 15
# bfd
# capability extended-nexthop
# capability dynamic
# !
# neighbor 192.168.1.4
# !
- name: "Deletes specific sonic_bgp_neighbors"
dellemc.enterprise_sonic.sonic_bgp_neighbors:
config:
- bgp_as: 51
vrf_name: VrfReg1
peer_group:
- name: SPINE
bfd: true
remote_as:
peer_as: 4
neighbors:
- neighbor: Eth1/3
remote_as:
peer_as: 10
peer_group: SPINE
advertisement_interval: 15
timers:
keepalive: 30
holdtime: 15
bfd: true
| |
0.0, 0.28870882, 0.0962085, np.nan, 2.09447737],
[-1.17001528, 0.0, 0.94512457, 0.92660979, np.nan, -3.59769363],
[-0.03628091, 0.0, 0.03747396, 0.42977612, np.nan, -1.40812549],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.36932341, 1.0, 0.38975346, 0.18673365, np.nan, 0.183227951],
[0.35749992, 1.0, 0.38773156, 0.24760958, np.nan, 0.15238873],
[0.665790452, 1.0, 0.310576130, 0.490512577, np.nan, 0.31162633],
[0.762862615, 1.0, 0.773401832, 0.923578823, np.nan, 0.0398388806],
[0.24469499, 1.0, 0.34687333, 0.35672228, np.nan, 5.94507747e-04],
[0.971128594, 1.0, 0.970181808, 0.668433443, np.nan, 0.163519781],
]
),
nan_ok=True,
)
# Pruned (just columns) - with insertions
transforms = {"columns_dimension": {"prune": True}}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
assert slice_.pairwise_significance_t_stats(1) == pytest.approx(
np.array(
[
[0.90169345, 0.0, -0.86382097, -1.33091849, -1.34418779],
[-0.92428183, 0.0, 0.86752477, 1.1640832, 1.44689871],
[0.4331791, 0.0, -1.0191817, 0.6924624, -1.0191817],
[-0.30252414, 0.0, 0.28870882, 0.0962085, 2.09447737],
[-1.17001528, 0.0, 0.94512457, 0.92660979, -3.59769363],
[np.nan, np.nan, np.nan, np.nan, np.nan],
[-0.03628091, 0.0, 0.03747396, 0.42977612, -1.40812549],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.369323414, 1.0, 0.389753462, 0.186733655, 0.18322795143],
[0.357499924, 1.0, 0.38773156, 0.24760958, 0.1523887297],
[0.66579045, 1.0, 0.31057612, 0.49051257, 0.3116263295],
[0.76286261, 1.0, 0.77340183, 0.92357882, 0.0398388806],
[0.24469499, 1.0, 0.346873337, 0.356722284, 0.00059450774],
[np.nan, np.nan, np.nan, np.nan, np.nan],
[0.97112859, 1.0, 0.97018180, 0.66843344, 0.1635197813],
]
),
nan_ok=True,
)
# Pruned (rows and columns) - with insertions
transforms = {
"rows_dimension": {"prune": True},
"columns_dimension": {"prune": True},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
assert slice_.pairwise_significance_t_stats(1) == pytest.approx(
np.array(
[
[0.90169345, 0.0, -0.86382097, -1.33091849, -1.34418779],
[-0.92428183, 0.0, 0.86752477, 1.1640832, 1.44689871],
[0.4331791, 0.0, -1.0191817, 0.6924624, -1.0191817],
[-0.30252414, 0.0, 0.28870882, 0.0962085, 2.09447737],
[-1.17001528, 0.0, 0.94512457, 0.92660979, -3.59769363],
[-0.03628091, 0.0, 0.03747396, 0.42977612, -1.40812549],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.36932341401, 1.0, 0.38975346251, 0.18673365581, 0.18322795143],
[0.35749992438, 1.0, 0.3877315607, 0.2476095888, 0.1523887297],
[0.6657904524, 1.0, 0.3105761295, 0.4905125765, 0.3116263295],
[0.7628626149, 1.0, 0.7734018321, 0.9235788234, 0.0398388806],
[0.2446949971, 1.0, 0.34687333799, 0.35672228487, 0.00059450774],
[0.9711285942, 1.0, 0.9701818076, 0.6684334428, 0.1635197813],
]
),
nan_ok=True,
)
# Not pruned - with insertions
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT).partitions[0]
assert slice_.pairwise_significance_t_stats(1) == pytest.approx(
np.array(
[
[0.90169345, 0.0, -0.86382097, -1.33091849, np.nan, -1.34418779],
[-0.92428183, 0.0, 0.86752477, 1.1640832, np.nan, 1.44689871],
[0.4331791, 0.0, -1.0191817, 0.6924624, np.nan, -1.0191817],
[-0.30252414, 0.0, 0.28870882, 0.0962085, np.nan, 2.09447737],
[-1.17001528, 0.0, 0.94512457, 0.92660979, np.nan, -3.59769363],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[-0.03628091, 0.0, 0.03747396, 0.42977612, np.nan, -1.40812549],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.369323414, 1.0, 0.389753462, 0.186733655, np.nan, 0.183227951],
[0.357499924, 1.0, 0.38773156, 0.24760958, np.nan, 0.15238872],
[0.66579045, 1.0, 0.31057612, 0.49051257, np.nan, 0.31162632],
[0.76286261, 1.0, 0.77340183, 0.92357882, np.nan, 0.03983888],
[0.24469499, 1.0, 0.346873337, 0.3567222848, np.nan, 0.0005945077],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[0.97112859, 1.0, 0.97018180, 0.66843344, np.nan, 0.16351978],
]
),
nan_ok=True,
)
def test_cat_hs_x_cat_hs_hiding_and_pruning_t_tests(self):
slice_ = Cube(
CR.CAT_HS_X_CAT_HS_EMPTIES,
transforms={
"rows_dimension": {
"elements": {"2": {"hide": True}},
"prune": True,
"order": {"type": "explicit", "element_ids": [0, 5, 2, 1, 4]},
},
"columns_dimension": {
"elements": {"2": {"hide": True}},
"prune": True,
"order": {"type": "explicit", "element_ids": [4, 2, 5, 0]},
},
},
).partitions[0]
assert slice_.pairwise_significance_t_stats(2) == pytest.approx(
np.array(
[
[0.7768486, -3.15797218, 0.0, 1.31529737, -3.15797218],
[1.3255319, 8.61476224, 0.0, 2.24083933, 8.61476224],
[-0.56515582, 5.75456407, 0.0, -0.97693327, 5.75456407],
[2.92481931, 0.52514588, 0.0, 4.02292482, 0.52514588],
[-1.81141912, -3.17763277, 0.0, -3.28290825, -3.17763277],
[-0.09211776, -4.20201154, 0.0, -0.16074485, -4.20201154],
[0.54642458, 2.89882543, 0.0, 0.95666343, 2.89882543],
]
)
)
assert slice_.pairwise_indices.tolist() == [
[(1, 4), (), (1, 4), (1, 4), ()],
[(), (0, 2, 3), (), (2,), (0, 2, 3)],
[(), (0, 2, 3), (), (), (0, 2, 3)],
[(1, 2, 4), (), (), (0, 1, 2, 4), ()],
[(3,), (), (1, 3, 4), (), ()],
[(1, 4), (), (1, 4), (1, 4), ()],
[(), (0, 2), (), (), (0, 2)],
]
def test_cat_hs_subdiff_x_cat_pairwise_t_test(self):
slice_ = Cube(CR.CAT_HS_SUBDIFF_X_CAT).partitions[0]
expected_tstats = np.array(
load_python_expression("cat-hs-subdiff-x-cat-pw-tstats")
)
expected_pvals = np.array(
load_python_expression("cat-hs-subdiff-x-cat-pw-pvals")
)
assert slice_.pairwise_significance_t_stats(3) == pytest.approx(expected_tstats)
assert slice_.pairwise_significance_p_vals(3) == pytest.approx(expected_pvals)
class TestOverlapsPairwiseSignificance:
def test_pairwise_significance_cat_x_mr_sub_x_mr_sel_0th_subvar(self):
slice_ = Cube(OL.CAT_X_MR_SUB_X_MR_SEL).partitions[0]
assert slice_.column_percentages.tolist() == [
[0.0, 100.0, 100.0],
[100.0, 0.0, 0.0],
]
assert slice_.pairwise_significance_t_stats(0) == pytest.approx(
np.array(
[
[0.0, 3.11420549, 2.61911361],
[0.0, -3.11420549, -2.61911361],
]
)
)
assert slice_.pairwise_significance_p_vals(0) == pytest.approx(
np.array(
[
[0.0, 0.05270861, 0.07906174],
[0.0, 0.05270861, 0.07906174],
]
)
)
def test_pairwise_significance_cat_x_mr_sub_x_mr_sel_1st_subvar(self):
slice_ = Cube(OL.CAT_X_MR_SUB_X_MR_SEL).partitions[0]
assert slice_.column_percentages.tolist() == [
[0.0, 100.0, 100.0],
[100.0, 0.0, 0.0],
]
assert slice_.pairwise_significance_t_stats(1) == pytest.approx(
np.array(
[
[-3.11420549, 0.0, 0.0],
[3.11420549, 0.0, 0.0],
]
)
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.05270861, 0.0, 1.0],
[0.05270861, 0.0, 1.0],
]
)
)
def test_pairwise_significance_cat_x_mr_sub_x_mr_sel_2nd_subvar(self):
slice_ = Cube(OL.CAT_X_MR_SUB_X_MR_SEL).partitions[0]
assert slice_.column_percentages.tolist() == [
[0.0, 100.0, 100.0],
[100.0, 0.0, 0.0],
]
assert slice_.pairwise_significance_t_stats(2) == pytest.approx(
np.array(
[
[-2.61911361, 0.0, 0.0],
[2.61911361, 0.0, 0.0],
],
),
)
assert slice_.pairwise_significance_p_vals(2) == pytest.approx(
np.array(
[
[0.07906174, 1.0, 0.0],
[0.07906174, 1.0, 0.0],
],
),
)
def test_pairwise_significance_cat_x_mr_realistic_example(self):
slice_ = Cube(OL.CAT_X_MR_REALISTIC_EXAMPLE).partitions[0]
assert slice_.column_percentages == pytest.approx(
np.array(
[
[52.7687, 52.5926, 51.5504, 47.6852, 51.3889, np.nan],
[47.2313, 47.4074, 48.4496, 52.3148, 48.6111, np.nan],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_t_stats(4) == pytest.approx(
np.array(
[
[1.00337549, 0.64382181, 0.0773666, -1.3677023, 0.0, np.nan],
[-1.00337549, -0.64382181, -0.0773666, 1.3677023, 0.0, np.nan],
],
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(4) == pytest.approx(
np.array(
[
[0.31647509, 0.52017481, 0.93838264, 0.17241219, 0.0, np.nan],
[0.31647509, 0.52017481, 0.93838264, 0.17241219, 0.0, np.nan],
],
),
nan_ok=True,
)
def test_pairwise_significance_mr_x_mr(self):
slice_ = Cube(OL.MR_X_MR).partitions[0]
assert slice_.column_percentages == pytest.approx(
np.array(
[
[100.0, 66.66667, np.nan],
[66.66667, 100.0, np.nan],
[0.0, 0.0, np.nan],
]
),
nan_ok=True,
)
assert slice_.pairwise_significance_t_stats(1) == pytest.approx(
np.array(
[
[1.22474487, 0.0, np.nan],
[-1.22474487, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
),
nan_ok=True,
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.28786413, 1.0, np.nan],
[0.27521973, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
),
nan_ok=True,
)
def test_pairwise_cat_x_mr_gender_x_all_pets_owned_with_weighted_counts(self):
slice_ = Cube(OL.CAT_X_MR_GENDER_X_ALL_PETS_OWNED).partitions[0]
assert slice_.column_percentages.tolist() == pytest.approx(
np.array(
[
[66.6667, 14.28571, 50.0],
[33.33333, 85.714286, 50.0],
]
)
)
# Assert for first column (subvariable)
assert slice_.pairwise_significance_t_stats(0).tolist() == pytest.approx(
np.array(
[
[0.0, -2.6315597, -1.76353],
[0.0, 2.6315597, 1.76353],
]
),
)
assert slice_.pairwise_significance_p_vals(0) == pytest.approx(
np.array(
[
[0.0, 0.01410448, 0.0879948],
[0.0, 0.01410448, 0.0879948],
]
),
)
# Assert for second column (subvariable)
assert slice_.pairwise_significance_t_stats(1).tolist() == pytest.approx(
np.array(
[
[2.63156, 0.0, 8.10444],
[-2.63156, 0.0, -8.10444],
]
),
)
assert slice_.pairwise_significance_p_vals(1) == pytest.approx(
np.array(
[
[0.01410448, 0, 0.025067e-06],
[0.01410448, 0, 0.025067e-06],
]
),
)
# Assert for third column (subvariable)
assert slice_.pairwise_significance_t_stats(2).tolist() == pytest.approx(
np.array(
[
[1.763531, -8.104439, 0.0],
[-1.763531, 8.104439, 0.0],
]
),
)
assert slice_.pairwise_significance_p_vals(2) == pytest.approx(
np.array(
[
[0.0879948, 0.025067e-06, 0],
[0.0879948, 0.025067e-06, 0],
]
),
)
def test_pairwise_significance_indices(self):
transforms = {"pairwise_indices": {"alpha": [0.05, 0.13]}}
slice_ = Cube(
OL.CAT_X_MR_GENDER_X_ALL_PETS_OWNED, transforms=transforms
).partitions[0]
assert slice_.column_percentages.tolist() == pytest.approx(
np.array(
[
[66.6667, 14.28571, 50.0],
[33.33333, 85.714286, 50.0],
]
)
)
assert slice_.pairwise_indices.tolist() == [
[(1,), (), (1,)],
[(), (0, 2), ()],
]
assert slice_.pairwise_indices_alt.tolist() == [
[(1, 2), (), (1,)],
[(), (0, 2), (0,)],
]
def test_alt_pairwise_indices_without_alt_alpha(self):
slice_ = Cube(OL.CAT_X_MR_GENDER_X_ALL_PETS_OWNED).partitions[0]
assert slice_.pairwise_indices_alt is None
def test_pairwise_significance_all_empty(self):
# ---Keep the alpha value this small to demo the error found in cr.server
transforms = {"pairwise_indices": {"alpha": [0.0000000001]}}
slice_ = Cube(
OL.CAT_X_MR_GENDER_X_ALL_PETS_OWNED, transforms=transforms
).partitions[0]
assert slice_.column_percentages.tolist() == pytest.approx(
np.array(
[
[66.6667, 14.28571, 50.0],
[33.33333, 85.714286, 50.0],
]
)
)
assert slice_.pairwise_indices.tolist() == [
[(), (), ()],
[(), (), ()],
]
def test_pairwise_sig_for_realistic_example_mr_x_mr(self):
slice_ = Cube(OL.MR_X_MR_REALISTIC_EXAMPLE).partitions[0]
assert slice_.pairwise_significance_t_stats(3) == pytest.approx(
np.array(
[
[13.40, 9.62, -0.49, 0.0, 2.27, -24.08],
[-0.58, 19.94, -4.76, 0.0, -2.88, -13.48],
[-0.53, 0.70, 22.76, 0.0, 10.32, -19.63],
[-18.79, -13.57, -20.00, 0.0, -13.14, -29.10],
[-5.45, -0.99, -2.83, 0.0, 21.75, -12.29],
[-0.76, 0.17, -3.35, 0.0, -1.07, 31.68],
]
),
abs=10e-2,
)
def test_pairwise_significance_indices_for_realistic_example_mr_x_mr(self):
transforms = {"columns_dimension": {"elements": {"1": {"hide": True}}}}
slice_ = Cube(OL.MR_X_MR_REALISTIC_EXAMPLE, transforms=transforms).partitions[0]
assert slice_.pairwise_indices.tolist() == [
[(0, 1, 2, 3, 4), (1, 2, 3, 4), (4,), (4,), (1, 2, 4)],
[(1, 4), (1, 2, 3, 4), (4,), (1, 3, 4), (1, 4)],
[(4,), (4,), (0, 2, 3, 4), (4,), (0, 2, 4)],
[(4,), (1, 4), (4,), (0, 1, 3, 4), (1, 4)],
[(4,), (1, 4), (4,), (1, 4), (0, 1, 2, 4)],
[(1,), (1,), (), (1,), (1,)],
]
def test_pairwise_sig_for_mr_x_mr_vs_mr_single_subvar_x_mr(self):
mr_x_mr_slice = Cube(OL.MR_X_MR).partitions[0]
t_stats_mr_x_mr = mr_x_mr_slice.pairwise_significance_t_stats(1)
mr_subvar_x_mr_slice = Cube(OL.MR_SINGLE_SUBVAR_X_MR).partitions[0]
t_stats_mr_subvar_x_mr = mr_subvar_x_mr_slice.pairwise_significance_t_stats(1)
# Assert same row stats are the same in both cases (MR x MR and MR_SEL x MR)
np.testing.assert_array_equal(t_stats_mr_x_mr[0], t_stats_mr_subvar_x_mr[0])
class TestMeanDifferenceSignificance:
def test_mean_diff_significance_for_numeric_array_grouped_by_cat(self):
slice_ = Cube(NA.NUM_ARR_MULTI_NUMERIC_MEASURES_GROUPED_BY_CAT).partitions[0]
assert slice_.pairwise_significance_means_t_stats(0) == pytest.approx(
np.array(
[
[0.0, -0.32190273, -1.884166, -2.16152588],
[0.0, -1.91311986, -2.91790845, -1.50036042],
[0.0, 1.18770459, 2.59364411, 0.97704863],
]
)
)
assert slice_.pairwise_significance_means_p_vals(0) == pytest.approx(
np.array(
[
[1.0, 0.76970151, 0.17305508, 0.15932937],
[1.0, | |
schedule_id)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering schedule_fired to location microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
return
# Device intelligence modules
for device_id in self.devices:
if hasattr(self.devices[device_id], "intelligence_modules"):
for intelligence_id in self.devices[device_id].intelligence_modules:
try:
self.devices[device_id].intelligence_modules[intelligence_id].schedule_fired(botengine, schedule_id)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering schedule_fired to device microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def timer_fired(self, botengine, argument):
"""
Timer fired
:param botengine: BotEngine environment
:param argument: Optional argument
"""
return
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
for intelligence_id in self.intelligence_modules:
try:
self.intelligence_modules[intelligence_id].file_uploaded(botengine, device_object, file_id, filesize_bytes, content_type, file_extension)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering file_uploaded to location microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def user_role_updated(self, botengine, user_id, category, location_access, previous_category, previous_location_access):
"""
A user changed roles
:param botengine: BotEngine environment
:param location_id: Location ID
:param user_id: User ID that changed roles
:param category: User's current alert/communications category (1=resident; 2=supporter)
:param location_access: User's access to the location and devices. (0=None; 10=read location/device data; 20=control devices and modes; 30=update location info and manage devices)
:param previous_category: User's previous category, if any
:param previous_location_access: User's previous access to the location, if any
"""
self.synchronize_users(botengine)
# User objects
if user_id in self.users:
try:
self.users[user_id].user_role_updated(botengine, user_id, category, location_access, previous_category, previous_location_access)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering user_role_updated to user object (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Location intelligence modules
for intelligence_id in self.intelligence_modules:
try:
self.intelligence_modules[intelligence_id].user_role_updated(botengine, user_id, category, location_access, previous_category, previous_location_access)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering user_role_updated to location microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Device intelligence modules
for device_id in self.devices:
if hasattr(self.devices[device_id], "intelligence_modules"):
for intelligence_id in self.devices[device_id].intelligence_modules:
try:
self.devices[device_id].intelligence_modules[intelligence_id].user_role_updated(botengine, user_id, category, location_access, previous_category, previous_location_access)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering user_role_updated to device microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def call_center_updated(self, botengine, user_id, status):
"""
Emergency call center status has changed
:param botengine: BotEngine environment
:param user_id: User ID that made the change
:param status: Current call center status
"""
# Location intelligence modules
for intelligence_id in self.intelligence_modules:
try:
self.intelligence_modules[intelligence_id].call_center_updated(botengine, user_id, status)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering call_center_updated to device microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Device intelligence modules
for device_id in self.devices:
if hasattr(self.devices[device_id], "intelligence_modules"):
for intelligence_id in self.devices[device_id].intelligence_modules:
try:
self.devices[device_id].intelligence_modules[intelligence_id].call_center_updated(botengine, user_id, status)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering call_center_updated to device microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def data_request_ready(self, botengine, reference, device_csv_dict):
"""
A botengine.request_data() asynchronous request for CSV data is ready.
:param botengine: BotEngine environment
:param reference: Optional reference passed into botengine.request_data(..)
:param device_csv_dict: { 'device_id': 'csv data string' }
"""
# Location microservices
for intelligence_id in self.intelligence_modules:
try:
self.intelligence_modules[intelligence_id].data_request_ready(botengine, reference, device_csv_dict)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering data_request_ready to location microservice : " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Device microservices
for device_id in self.devices:
if hasattr(self.devices[device_id], "intelligence_modules"):
for intelligence_id in self.devices[device_id].intelligence_modules:
try:
self.devices[device_id].intelligence_modules[intelligence_id].data_request_ready(botengine, reference, device_csv_dict)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering data_request_ready to device microservice : " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def update_coordinates(self, botengine, latitude, longitude):
"""
Attempt to update coordinates
:param botengine: BotEngine environment
:param latitude: Current latitude
:param longitude: Current longitude
"""
# Added June 26, 2019
if not hasattr(self, 'latitude'):
self.latitude = None
self.longitude = None
if self.latitude != latitude or self.longitude != longitude:
self.latitude = latitude
self.longitude = longitude
for intelligence_id in self.intelligence_modules:
try:
self.intelligence_modules[intelligence_id].coordinates_updated(botengine, self.latitude, self.longitude)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering coordinates_updated to location microservice : " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def get_microservice_by_id(self, microservice_id):
"""
Get a microservice ("intelligence module") by its id ("intelligence_id").
Sorry we named them 'intelligence modules' at first when they were really microservices.
:param microservice_id: Microservice ID
:return: Microservice object, or None if it doesn't exist.
"""
for microservice in self.intelligence_modules:
if self.intelligence_modules[microservice].intelligence_id == microservice_id:
return self.intelligence_modules[microservice]
for device_id in self.devices:
for microservice in self.devices[device_id].intelligence_modules:
if self.devices[device_id].intelligence_modules[microservice].intelligence_id == microservice_id:
return self.devices[device_id].intelligence_modules[microservice]
return None
# ===========================================================================
# Location User
# ===========================================================================
def synchronize_users(self, botengine):
"""
Synchronize our set of users
:param botengine: Botengine environment
"""
users = botengine.get_location_users()
user_id_list = []
for user_json in users:
user_id = user_json['id']
user_id_list.append(user_id)
first_name = ""
last_name = ""
location_access = None
alert_category = None
language = 'en'
if 'firstName' in user_json:
first_name = user_json['firstName']
if 'lastName' in user_json:
last_name = user_json['lastName']
if 'locationAccess' in user_json:
location_access = user_json['locationAccess']
if 'category' in user_json:
alert_category = user_json['category']
if user_id not in self.users:
self.users[user_id] = User(botengine, user_json['id'])
# Synchronize
self.users[user_id].first_name = first_name
self.users[user_id].last_name = last_name
self.users[user_id].location_access = location_access
self.users[user_id].alert_category = alert_category
self.users[user_id].language = language
self.users[user_id].location_object = self
# Delete users that no longer exist
for user_id in list(self.users.keys()):
if user_id not in user_id_list:
self.users[user_id].destroy(botengine)
del self.users[user_id]
def get_user(self, botengine, user_id):
"""
Get user object by ID
:param botengine: BotEngine environment
:param user_id: User ID
:return: User object, or None if it doesn't exist
"""
if user_id in self.users:
return self.users[user_id]
# Resynchronize and try one more time...
self.synchronize_users(botengine)
if user_id in self.users:
return self.users[user_id]
return None
#===========================================================================
# General location information
#===========================================================================
def get_location_name(self, botengine):
"""
Get the nickname of this location
:param botengine: BotEngine environment
:return: Nickname
"""
return botengine.get_location_name()
#===========================================================================
# Mode
#===========================================================================
def set_mode(self, botengine, mode, comment=None):
"""
Set the mode for this location
:param botengine: BotEngine environment
:param comment: Optional comment about why the mode was set
"""
botengine.set_mode(self.location_id, mode, comment)
# Allow the bot to trigger again and set the mode from a single unified action.
def get_user_facing_mode(self, botengine, mode):
"""
The modes recognized by most services include "HOME", "AWAY", "STAY", "TEST".
The user-facing representation of these modes may be different. For example,
some brands prefer the user to see "OFF" instead of "HOME".
This method will transform a mode name into the user-facing representation
for interaction with the user. Use the domain.py file at the root of your bot
to specify this mapping.
:param botengine: BotEngine environment
:param mode: Internal mode name, including "HOME", "AWAY", "STAY", "TEST".
:return: User-facing text representation of that mode
"""
try:
return properties.get_property(botengine, "USER_FACING_MODES")[mode]
except:
botengine.get_logger().warning("location.py: Mode '{}' not found in domain.USER_FACING_MODES".format(mode))
return mode
#===========================================================================
# Location Properties
#===========================================================================
def set_location_property(self, botengine, property_name, property_value, track=True):
"""
Set a location property
:param botengine: BotEngine environment
:param property_name: Property name
:param property_value: Property value
:param track: True to automatically copy these properties to the 3rd party analytics (default is True
"""
self._sync_location_properties(botengine)
self.location_properties[property_name] = property_value
botengine.set_ui_content('location_properties', self.location_properties)
if track:
import signals.analytics as analytics
analytics.people_set(botengine, self, {property_name: property_value})
def update_location_properties(self, botengine, properties_dict, track=True):
"""
Update multiple location properties simultaneously from a dictionary.
If the properties don't exist yet, they will be added.
:param botengine: BotEngine environment
:param properties_dict: Properties dictionary with key/values to update
:param track: True to automatically copy these properties to the 3rd party analytics (default is True)
"""
self._sync_location_properties(botengine)
self.location_properties.update(properties_dict)
botengine.set_ui_content('location_properties', self.location_properties)
if track:
import signals.analytics as analytics
analytics.people_set(botengine, self, properties_dict)
def increment_location_property(self, botengine, property_name, increment_amount=1, track=True):
"""
Increment a location property integer by the amount given.
If the property doesn't exist, it will be initialized to 0 and then incremented by the amount given.
An existing property must be numeric to increment.
:param botengine: BotEngine environment
:param property_name: Property name to increment
:param increment_amount: Incremental amount to add (default is 1)
:param track: True to automatically copy these properties to the 3rd party analytics (default is True)
"""
self._sync_location_properties(botengine)
if property_name not in self.location_properties:
self.location_properties[property_name] = 0
self.location_properties[property_name] += | |
import os
import random
from datetime import date, timedelta
import pytest
from rivoli.compute import (
DayHistoricalRankEvent,
HistoricalRecordEvent,
HistoricalTotalEvent,
MonthRecordEvent,
MonthSummaryEvent,
MonthTotalEvent,
YearSummaryEvent,
YearTotalEvent,
_build_french_ordinal,
_capitalize_first_letter,
_compute_day_expression,
_compute_first_half_of_tweet,
_crosses_power_of_ten,
_cumulate,
_day_is_absolute_maximum,
_day_is_first_day_of_month,
_day_is_first_day_of_year,
_day_is_last_day_of_month,
_day_is_monthly_record,
_day_is_year_maximum,
_day_rank,
_extract_total_count,
_get_month,
_get_month_range,
_group_by_month,
_group_by_year,
_increments_first_digit,
_month_to_cumulate_sums,
_month_to_french,
_number_is_funny,
_optimistic_rank,
_prettify_number,
_round_to_twentieth,
_safe_get_count,
build_tweet,
day_is_today,
day_is_yesterday,
)
from rivoli.models import CountHistory, DayCount, Hashtag, Month
from rivoli.utils import parse_ymd
def test_day_is_today():
assert day_is_today(date.today())
assert not day_is_today(date.today() + timedelta(days=1))
assert not day_is_today(date.today() + timedelta(days=365))
assert not day_is_today(date.today() - timedelta(days=366))
def test_day_is_yesterday():
assert not day_is_yesterday(date.today())
assert not day_is_yesterday(date.today() + timedelta(days=1))
assert not day_is_yesterday(date.today() + timedelta(days=365))
assert not day_is_yesterday(date.today() - timedelta(days=366))
assert day_is_yesterday(date.today() - timedelta(days=1))
def test_safe_get_count():
with pytest.raises(ValueError):
_safe_get_count(date.today(), CountHistory([]))
_safe_get_count(date.today(), CountHistory([DayCount(date.today() + timedelta(1), 1)]))
assert _safe_get_count(date.today(), CountHistory([DayCount(date.today(), 1)]))
_COUNT_HISTORY_CSV = '''2020/08/31,100
2020/09/01,200
2020/09/02,350
2020/09/03,250
2020/09/04,50
2020/09/05,120'''
def _get_small_count_history() -> CountHistory:
return CountHistory.from_csv(_COUNT_HISTORY_CSV)
def _get_folder() -> str:
return '/'.join(__file__.split('/')[:-1])
def _get_rivoli_test_count_history() -> CountHistory:
return CountHistory.from_csv(open(os.path.join(_get_folder(), 'test_data', 'rivoli_test_data.csv')).read())
def test_day_is_absolute_maximum():
count_history = _get_small_count_history()
assert _day_is_absolute_maximum(parse_ymd('2020/09/02'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/09/03'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/09/04'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/09/05'), count_history)
count_history = _get_rivoli_test_count_history()
assert not _day_is_absolute_maximum(parse_ymd('2020/09/03'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/09/04'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/09/05'), count_history)
assert not _day_is_absolute_maximum(parse_ymd('2020/12/05'), count_history)
def test_day_is_year_maximum():
assert _day_is_year_maximum(date.today(), CountHistory([DayCount(date.today(), 10)]))
assert not _day_is_year_maximum(date(2020, 8, 31), _get_small_count_history())
assert not _day_is_year_maximum(date(2020, 9, 1), _get_small_count_history())
assert _day_is_year_maximum(date(2020, 9, 2), _get_small_count_history())
assert not _day_is_year_maximum(date(2020, 9, 3), _get_small_count_history())
assert not _day_is_year_maximum(date(2020, 9, 4), _get_small_count_history())
assert not _day_is_year_maximum(date(2020, 9, 5), _get_small_count_history())
assert not _day_is_year_maximum(date(2020, 9, 3), _get_rivoli_test_count_history())
assert not _day_is_year_maximum(date(2020, 9, 4), _get_rivoli_test_count_history())
def test_day_is_monthly_record():
assert _day_is_monthly_record(date.today(), CountHistory([DayCount(date.today(), 10)]))
assert _day_is_monthly_record(date(2020, 8, 31), _get_small_count_history())
assert not _day_is_monthly_record(date(2020, 9, 1), _get_small_count_history())
assert _day_is_monthly_record(date(2020, 9, 2), _get_small_count_history())
assert not _day_is_monthly_record(date(2020, 9, 3), _get_small_count_history())
assert not _day_is_monthly_record(date(2020, 9, 4), _get_small_count_history())
assert not _day_is_monthly_record(date(2020, 9, 5), _get_small_count_history())
assert not _day_is_monthly_record(date(2020, 9, 3), _get_rivoli_test_count_history())
assert not _day_is_monthly_record(date(2020, 9, 4), _get_rivoli_test_count_history())
def test_day_rank():
assert _day_rank(date.today(), CountHistory([DayCount(date.today(), 10)])) == 0
assert _day_rank(date(2020, 8, 31), _get_small_count_history()) == 4
assert _day_rank(date(2020, 9, 1), _get_small_count_history()) == 2
assert _day_rank(date(2020, 9, 2), _get_small_count_history()) == 0
assert _day_rank(date(2020, 9, 3), _get_small_count_history()) == 1
assert _day_rank(date(2020, 9, 4), _get_small_count_history()) == 5
assert _day_rank(date(2020, 9, 5), _get_small_count_history()) == 3
assert _day_rank(date(2020, 9, 3), _get_rivoli_test_count_history()) == 8
def test_optimistic_rank():
assert _optimistic_rank(1, [1, 1, 1]) == 0
assert _optimistic_rank(1, [1] * 10) == 0
assert _optimistic_rank(1, [2] * 10 + [1]) == 10
assert _optimistic_rank(1, [2] * 10 + [1] * 30) == 10
assert _optimistic_rank(2, [3, 1, 2]) == 1
assert _optimistic_rank(2, [3, 1, 2, 10, 34, 12]) == 4
assert _optimistic_rank(1, [3, 1, 2, 10, 34, 12]) == 5
assert _optimistic_rank(1, [3, 1, 2, 10, 34, 12, 1]) == 5
def test_day_is_last_day_of_month():
assert _day_is_last_day_of_month(date(2020, 1, 31))
assert _day_is_last_day_of_month(date(2020, 3, 31))
assert _day_is_last_day_of_month(date(1928, 12, 31))
assert _day_is_last_day_of_month(date(2032, 3, 31))
assert _day_is_last_day_of_month(date(2020, 2, 29))
assert _day_is_last_day_of_month(date(2020, 12, 31))
assert not _day_is_last_day_of_month(date(2020, 2, 28))
assert not _day_is_last_day_of_month(date(2020, 1, 1))
assert not _day_is_last_day_of_month(date(2020, 3, 1))
assert not _day_is_last_day_of_month(date(1928, 12, 1))
assert not _day_is_last_day_of_month(date(2032, 3, 4))
assert not _day_is_last_day_of_month(date(2020, 1, 1))
assert not _day_is_last_day_of_month(date(2020, 12, 1))
def test_day_is_first_day_of_year():
assert _day_is_first_day_of_year(date(2020, 1, 1))
assert not _day_is_first_day_of_year(date(2020, 3, 1))
assert not _day_is_first_day_of_year(date(1928, 12, 1))
assert not _day_is_first_day_of_year(date(2032, 3, 4))
assert _day_is_first_day_of_year(date(2020, 1, 1))
assert not _day_is_first_day_of_year(date(2020, 12, 1))
def test_day_is_first_day_of_month():
assert _day_is_first_day_of_month(date(2020, 1, 1))
assert _day_is_first_day_of_month(date(2020, 3, 1))
assert _day_is_first_day_of_month(date(1928, 12, 1))
assert not _day_is_first_day_of_month(date(2032, 3, 4))
assert not _day_is_first_day_of_month(date(2020, 1, 18))
assert not _day_is_first_day_of_month(date(2020, 12, 2))
def test_extract_total_count():
assert _extract_total_count(CountHistory([])) == 0
assert _extract_total_count(_get_small_count_history()) == 1070
assert _extract_total_count(_get_rivoli_test_count_history()) == 2841547
def test_get_month():
assert _get_month(date.today()) == Month(date.today().month, date.today().year)
assert _get_month(parse_ymd('2020/08/09')) == Month(8, 2020)
assert _get_month(parse_ymd('2000/07/09')) == Month(7, 2000)
def test_group_by_month():
assert _group_by_month(CountHistory([])) == {}
grouped = _group_by_month(_get_small_count_history())
assert grouped[Month(8, 2020)] == [100]
assert grouped[Month(9, 2020)] == [200, 350, 250, 50, 120]
def test_group_by_year():
assert _group_by_year(CountHistory([])) == {}
assert _group_by_year(_get_small_count_history()) == {2020: [100, 200, 350, 250, 50, 120]}
def test_cumulate():
assert _cumulate([1] * 10) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert _cumulate([1] * 3) == [1, 2, 3]
assert _cumulate([1, 2, 3]) == [1, 3, 6]
assert _cumulate([]) == []
def test_month_to_cumulate_sums():
assert _month_to_cumulate_sums(CountHistory([])) == {}
cumsums = _month_to_cumulate_sums(_get_small_count_history())
assert len(cumsums) == 2
assert cumsums[Month(8, 2020)] == [100]
assert cumsums[Month(9, 2020)] == [200, 550, 800, 850, 970]
def test_get_month_range():
assert _get_month_range(Month(5, 1993)) == 31
assert _get_month_range(Month(3, 2020)) == 31
assert _get_month_range(Month(2, 2020)) == 29
assert _get_month_range(Month(2, 2021)) == 28
assert _get_month_range(Month(12, 2021)) == 31
def test_number_is_funny():
assert _number_is_funny(1000)
assert _number_is_funny(9999)
assert _number_is_funny(1000000)
assert _number_is_funny(9000)
assert _number_is_funny(777)
assert not _number_is_funny(31)
assert not _number_is_funny(319)
assert not _number_is_funny(1001)
assert not _number_is_funny(10001)
assert not _number_is_funny(999990)
def test_compute_day_expression():
assert _compute_day_expression(date.today(), date.today()) == 'Aujourd\'hui'
assert _compute_day_expression(date.today() - timedelta(days=1), date.today()) == 'Hier'
assert _compute_day_expression(date(2020, 1, 1), date.today()) == 'Le 01/01/2020'
assert _compute_day_expression(date(2020, 1, 1), date(2020, 1, 1)) == 'Aujourd\'hui'
assert _compute_day_expression(date(2020, 1, 1), date(2020, 1, 2)) == 'Hier'
assert _compute_day_expression(date(2020, 1, 1), date(2020, 1, 3)) == 'Le 01/01/2020'
def test_compute_first_half_of_tweet():
count_history = _get_small_count_history()
expected = 'Le 31/08/2020, il y a eu 100 passages de cyclistes.'
assert count_history.daily_counts[0].date == date(2020, 8, 31)
assert _compute_first_half_of_tweet(count_history.daily_counts[0].date, count_history, date.today()) == expected
expected = 'Aujourd\'hui, il y a eu 100 passages de cyclistes.'
assert (
_compute_first_half_of_tweet(count_history.daily_counts[0].date, count_history, date(2020, 8, 31)) == expected
)
expected = 'Hier, il y a eu 100 passages de cyclistes.'
assert _compute_first_half_of_tweet(count_history.daily_counts[0].date, count_history, date(2020, 9, 1)) == expected
expected = 'Hier, il y a eu 120 passages de cyclistes.'
assert (
_compute_first_half_of_tweet(count_history.daily_counts[-1].date, count_history, date(2020, 9, 6)) == expected
)
def test_round_to_twentieth():
assert _round_to_twentieth(0.23) == 25
assert _round_to_twentieth(0) == 0
assert _round_to_twentieth(0.0001) == 5
assert _round_to_twentieth(1) == 100
assert _round_to_twentieth(0.3249) == 35
assert _round_to_twentieth(0.23408) == 25
def test_month_to_french():
assert _month_to_french(Month(2, 2020)) == 'Février 2020'
assert _month_to_french(Month(5, 2010)) == 'Mai 2010'
def test_build_french_ordinal():
assert _build_french_ordinal(1) == '2ème '
assert _build_french_ordinal(0) == ''
assert _build_french_ordinal(4) == '5ème '
assert _build_french_ordinal(5) == '6ème '
assert _build_french_ordinal(20) == '21ème '
def test_default_message():
assert MonthRecordEvent(date.today()).default_message() == 'Record du mois !'
assert HistoricalRecordEvent(100).default_message() == 'Record historique !'
assert DayHistoricalRankEvent(0, 100).default_message() == 'Meilleur jour historique.'
assert DayHistoricalRankEvent(50, 100).default_message() == 'Top 55%.'
assert DayHistoricalRankEvent(99, 100).default_message() == 'Top 100%.'
assert DayHistoricalRankEvent(40, 1000).default_message() == 'Top 5%.'
assert DayHistoricalRankEvent(4, 100).default_message() == '5ème meilleur jour historique.'
assert DayHistoricalRankEvent(140, 159).default_message() == 'Top 90%.'
expected = 'Février 2020 : 5ème meilleur mois de l\'histoire avec 14 000 passages.'
assert MonthSummaryEvent(Month(2, 2020), 14000, 4).default_message() == expected
expected = 'Février 2020 : meilleur mois de l\'histoire avec 14 000 passages.'
assert MonthSummaryEvent(Month(2, 2020), 14000, 0).default_message() == expected
expected = 'Mars 2010 : meilleur mois de l\'histoire avec 15 000 passages.'
assert MonthSummaryEvent(Month(3, 2010), 15000, 0).default_message() == expected
expected = 'Mars 2010 : 11ème meilleur mois de l\'histoire avec 15 000 passages.'
assert MonthSummaryEvent(Month(3, 2010), 15000, 10).default_message() == expected
expected = '2020 : 5ème meilleure année de l\'histoire avec 14 000 passages.'
assert YearSummaryEvent(2020, 14000, 4).default_message() == expected
expected = '2020 : meilleure année de l\'histoire avec 14 000 passages.'
assert YearSummaryEvent(2020, 14000, 0).default_message() == expected
expected = '2010 : meilleure année de l\'histoire avec 15 000 passages.'
assert YearSummaryEvent(2010, 15000, 0).default_message() == expected
expected = '2010 : 11ème meilleure année de l\'histoire avec 15 000 passages.'
assert YearSummaryEvent(2010, 15000, 10).default_message() == expected
assert YearTotalEvent(15000, date.today(), 10).default_message() == '15 000 passages depuis le début de l\'année.'
assert YearTotalEvent(34003, date.today(), 10).default_message() == '34 003 passages depuis le début de l\'année.'
assert MonthTotalEvent(15000, date.today(), 10).default_message() == '15 000 passages depuis le début du mois.'
assert MonthTotalEvent(34003, date.today(), 10).default_message() == '34 003 passages depuis le début du mois.'
assert HistoricalTotalEvent(15000, 10).default_message() == '15 000 passages depuis l\'installation du compteur.'
assert HistoricalTotalEvent(34003, 10).default_message() == '34 003 passages depuis l\'installation du compteur.'
def test_capitalize_first_letter():
assert _capitalize_first_letter('4ème') == '4ème'
assert _capitalize_first_letter('meilleur') == 'Meilleur'
assert _capitalize_first_letter('Foo') == 'Foo'
assert _capitalize_first_letter('bar') == 'Bar'
assert _capitalize_first_letter('') == ''
def test_crosses_power_of_ten():
assert _crosses_power_of_ten(1, 10000)
assert _crosses_power_of_ten(10, 100)
assert _crosses_power_of_ten(3, 1000)
assert _crosses_power_of_ten(19, 1000)
assert not _crosses_power_of_ten(19, 90)
assert not _crosses_power_of_ten(1932, 9009)
assert not _crosses_power_of_ten(1932, 900)
def test_increments_first_digit():
assert _increments_first_digit(100000, 200000)
assert _increments_first_digit(1234324, 3233235)
assert _increments_first_digit(1, 2)
assert not _increments_first_digit(2, 2)
assert not _increments_first_digit(210, 244)
assert not _increments_first_digit(210, 299)
def test_end_to_end():
test_counter = _get_rivoli_test_count_history()
day = date(2020, 1, 8)
expected_tweet = 'Hier, il y a eu 8 812 passages de cyclistes.\n7ème meilleur jour historique.\n#CompteurRivoli'
assert build_tweet(day, test_counter, day + timedelta(days=1), | |
import csv
import tkSimpleDialog
import webbrowser
from Tkinter import *
import ttk
import pandas as pd
import os
import collections
import subprocess
import tkFileDialog, tkMessageBox
import json
import requests
from camera_handler import API_Camera_Handler
from hp_data import exts
import data_files
"""
Contains classes for managing camera browser adding/editing
"""
class HP_Device_Form(Toplevel):
"""This class creates the window to add a new device. Must have browser login."""
def __init__(self, master, validIDs=None, pathvar=None, token=None, browser=None, gan=False):
Toplevel.__init__(self, master)
self.geometry("%dx%d%+d%+d" % (600, 600, 250, 125))
self.master = master
self.pathvar = pathvar # use this to set a tk variable to the path of the output txt file
self.validIDs = validIDs if validIDs is not None else []
self.set_list_options()
self.camera_added = False
self.is_gan = gan
self.renamed = {}
self.trello_token = StringVar()
self.trello_token.set(token) if token is not None else ''
self.browser_token = StringVar()
self.browser_token.set(browser) if browser is not None else ''
self.trello_key = data_files._TRELLO['app_key']
self.create_widgets()
def set_list_options(self):
"""
Sets combobox options for manufacturer, lens mounts, and device types
:return: None
"""
df = pd.read_csv(os.path.join(data_files._DB))
self.manufacturers = [str(x).strip() for x in df['Manufacturer'] if str(x).strip() != 'nan']
self.lens_mounts = [str(y).strip() for y in df['LensMount'] if str(y).strip() != 'nan']
self.device_types = [str(z).strip() for z in df['DeviceType'] if str(z).strip() != 'nan']
def create_widgets(self):
"""
Creates form widgets
:return: None
"""
self.f = VerticalScrolledFrame(self)
self.f.pack(fill=BOTH, expand=TRUE)
Label(self.f.interior, text='Add a new HP Device', font=('bold underline', 25)).pack()
Label(self.f.interior, text='Once complete, the new camera will be added automatically, and a notification card will be posted to trello.', wraplength=400).pack()
if not self.is_gan:
Label(self.f.interior, text='Sample File', font=('bold', 18)).pack()
Label(self.f.interior, text='This is required. Select an image/video/audio file. Once metadata is loaded from it, you may continue to complete the form.'
' Some devices can have multiple make/model configurations for images vs. video, or for apps. In this instances, submit this '
'form as normal, and then go to File->Update a Device on the main GUI.', wraplength=400).pack()
self.imageButton = Button(self.f.interior, text='Select File', command=self.populate_from_image)
self.imageButton.pack()
# all questions defined here. end name with a * to make mandatory
head = [('Media Type*', {'description': 'Select the type of media contained in the sample file (Image, Video, Audio)',
'type': 'readonlylist',
'values': ['image', 'video', 'audio']}),
('App', {'description': 'If the sample image was taken with a certain app, specify it here. Otherwise, leave blank.',
'type': 'text',
'values': None}),
('Exif Camera Make',{'description': 'Device make, pulled from device Exif.',
'type': 'list',
'values': self.manufacturers}),
('Exif Camera Model',{'description': 'Device model, pulled from device Exif.',
'type': 'text',
'values': None}),
('Device Serial Number', {'description': 'Device serial number, pulled from device Exif.',
'type': 'text',
'values': None}),
('Local ID*', {'description': 'This can be a one of a few forms. The most preferable is the cage number. If it is a personal device, you can use INITIALS-MODEL, such as'
' ES-iPhone4. Please check that the local ID is not already in use.',
'type': 'text',
'values': None}),
('Device Affiliation*', {'description': 'If it is a personal device, please define the affiliation as Other, and write in your organization and your initials, e.g. RIT-TK',
'type': 'radiobutton',
'values': ['RIT', 'PAR', 'Other (please specify):']}),
('HP Model*',{'description': 'Please write the make/model such as it would be easily identifiable, such as Samsung Galaxy S6.',
'type': 'text',
'values': None}),
('Edition',{'description': 'Specific edition of the device, if applicable and not already in the device\'s name.',
'type': 'text',
'values': None}),
('Device Type*',{'description': 'Select camera type. If none are applicable, select "other".',
'type': 'readonlylist',
'values':self.device_types}),
('Sensor Information',{'description': 'Sensor size/dimensions/other sensor info.',
'type': 'text',
'values': None}),
('Lens Mount*',{'description': 'Choose \"builtin\" if the device does not have interchangeable lenses.',
'type': 'list',
'values':self.lens_mounts}),
('Firmware/OS',{'description': 'Firmware/OS',
'type': 'text',
'values': None}),
('Firmware/OS Version',{'description': 'Firmware/OS Version',
'type': 'text',
'values': None}),
('General Description',{'description': 'Other specifications',
'type': 'text',
'values': None}),
]
self.headers = collections.OrderedDict(head)
self.questions = {}
for h in self.headers:
d = SectionFrame(self.f.interior, title=h, descr=self.headers[h]['description'], type=self.headers[h]['type'], items=self.headers[h]['values'], bd=5)
d.pack(pady=4)
self.questions[h] = d
Label(self.f.interior, text='Trello Login Token', font=(20)).pack()
Label(self.f.interior, text='This is required to send a notification of the new device to Trello.').pack()
trello_link = 'https://trello.com/1/authorize?key=' + self.trello_key + '&scope=read%2Cwrite&name=HP_GUI&expiration=never&response_type=token'
trelloTokenButton = Button(self.f.interior, text='Get Trello Token', command=lambda: self.open_link(trello_link))
trelloTokenButton.pack()
tokenEntry = Entry(self.f.interior, textvar=self.trello_token)
tokenEntry.pack()
Label(self.f.interior, text='Browser Login Token*', font=(20)).pack()
Label(self.f.interior, text='This allows for the creation of the new device.').pack()
browserTokenButton = Button(self.f.interior, text='Get Browser Token', command=lambda: tkMessageBox.showinfo("Get Browser Token", "Refer to the HP Tool guide to retrieve your browser token."))
browserTokenButton.pack()
browserEntry = Entry(self.f.interior, textvar=self.browser_token)
browserEntry.pack()
buttonFrame = Frame(self)
buttonFrame.pack()
self.okbutton = Button(buttonFrame, text='Complete', command=self.export_results)
self.okbutton.pack()
self.cancelbutton = Button(buttonFrame, text='Cancel', command=self.destroy)
self.cancelbutton.pack()
if self.is_gan:
self.questions['Exif Camera Make'].edit_items([])
self.questions['Device Type*'].edit_items(['Computational'])
self.questions['Device Type*'].set('Computational')
self.add_required('Edition')
self.questions['Sensor Information'].pack_forget()
self.questions['Device Serial Number'].pack_forget()
self.add_required('Exif Camera Model')
self.questions['HP Model*'].pack_forget()
self.questions["Lens Mount*"].pack_forget()
self.questions['Lens Mount*'].set("NA")
self.remove_required("Lens Mount*")
self.add_required('Firmware/OS')
self.add_required('Firmware/OS Version')
self.rename("Exif Camera Make", "GAN Name*", "Name of the GAN used")
else:
self.okbutton.configure(state='disabled')
for q, a in self.questions.iteritems():
a.disable()
def remove_required(self, data):
if not data.endswith("*"):
return
try:
self.headers[data[:-1]] = self.headers.pop(data)
self.questions[data].remove_required()
self.renamed[data[:-1]] = data
except KeyError:
return
def add_required(self, data):
if data.endswith("*"):
return
try:
self.headers[data + "*"] = self.headers.pop(data)
self.questions[data].add_required()
self.renamed[data + "*"] = data
except KeyError:
return
def rename(self, item, title, desc):
try:
self.headers[title] = self.headers.pop(item)
self.renamed[title] = item
self.questions[item].rename(title, desc)
except KeyError:
return
def populate_from_image(self):
"""
Fill mandatory exif-fillable fields
:return: None
"""
self.imfile = tkFileDialog.askopenfilename(title='Select Image File', parent=self)
if not self.imfile:
return
self.imageButton.config(text=os.path.basename(self.imfile))
args = ['exiftool', '-f', '-j', '-Model', '-Make', '-SerialNumber', self.imfile]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
exifData = json.loads(p)[0]
except:
self.master.statusBox.println('An error ocurred attempting to pull exif data from image.')
return
for q, a in self.questions.iteritems():
a.enable()
if exifData['Make'] != '-':
self.questions['Exif Camera Make'].set(exifData['Make'])
self.questions['Exif Camera Make'].disable()
if exifData['Model'] != '-':
self.questions['Exif Camera Model'].set(exifData['Model'])
self.questions['Exif Camera Model'].disable()
if exifData['SerialNumber'] != '-':
self.questions['Device Serial Number'].set(exifData['SerialNumber'])
self.questions['Device Serial Number'].disable()
self.okbutton.config(state='normal')
def export_results(self):
"""
Triggers when ok/complete button is clicked. Validates and exports the new camera data
:return: None
"""
if self.is_gan:
self.questions["HP Model*"].set(self.questions['Exif Camera Model'].get())
msg = None
for h in self.headers:
if h in self.renamed.keys():
contents = self.questions[self.renamed[h]].get()
else:
contents = self.questions[h].get()
if h.endswith('*') and contents == '':
msg = 'Field ' + h[:-1] + ' is a required field.'
break
if self.browser_token.get() == '':
msg = 'Browser Token is a required field.'
check = self.local_id_used()
msg = msg if check is None else check
if msg:
tkMessageBox.showerror(title='Error', message=msg, parent=self)
return
# post and check browser response
browser_resp = self.post_to_browser()
if browser_resp.status_code in (requests.codes.ok, requests.codes.created):
cont = tkMessageBox.askyesno(title='Complete', message='Successfully posted new camera information! Post notification to Trello?', parent=self)
self.camera_added = True
else:
tkMessageBox.showerror(title='Error', message='An error ocurred posting the new camera information to the MediBrowser. (' + str(browser_resp.status_code)+ ')', parent=self)
return
if cont:
code = self.post_to_trello()
if code is not None:
tkMessageBox.showerror('Trello Error', message='An error ocurred connecting to trello (' + str(
code) + ').\nIf you\'re not sure what is causing this error, email <EMAIL>.', parent=self)
else:
tkMessageBox.showinfo(title='Information', message='Complete!', parent=self)
self.destroy()
def post_to_browser(self):
"""
Handles the browser interaction
:return: requests.post() response
"""
url = self.master.settings.get_key("apiurl") + '/cameras/'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token ' + self.browser_token.get(),
}
data = { 'hp_device_local_id': self.questions['Local ID*'].get(),
'affiliation': self.questions['Device Affiliation*'].get(),
'hp_camera_model': self.questions['HP Model*'].get(),
'exif':[{'exif_camera_make': self.questions['Exif Camera Make'].get(),
'exif_camera_model': self.questions['Exif Camera Model'].get(),
'exif_device_serial_number': self.questions['Device Serial Number'].get(),
'hp_app': self.questions['App'].get(),
'media_type': self.questions['Media Type*'].get()}],
'camera_edition': self.questions['Edition'].get(),
'camera_type': self.questions['Device Type*'].get(),
'camera_sensor': self.questions['Sensor Information'].get(),
'camera_description': self.questions['General Description'].get(),
'camera_lens_mount': self.questions['Lens Mount*'].get(),
'camera_firmware': self.questions['Firmware/OS'].get(),
'camera_version': self.questions['Firmware/OS Version'].get()
}
data = self.json_string(data)
return requests.post(url, headers=headers, data=data)
def json_string(self, data):
"""
Convert a dictionary of camera data to a string. Also changes empty strings in the dict to None
:param data: dictionary containing camera data
:return: string version of data
"""
for key, val in data.iteritems():
if val == '':
data[key] = None
for configuration in data['exif']:
for key, val in configuration.iteritems():
if val == '':
configuration[key] = None
return json.dumps(data)
def local_id_used(self):
"""
Check if a user-entered local ID is already in use
:return: (string) Error message (if error), otherwise None
"""
print 'Verifying local ID is not already in use...'
c = API_Camera_Handler(self, token=self.browser_token.get(), url=self.master.settings.get_key("apiurl"), given_id=self.questions["Local ID*"].get())
local_id_reference = c.get_local_ids()
if self.questions['Local ID*'].get().lower() in [i.lower() for i in local_id_reference]:
return 'Local ID ' + self.questions['Local ID*'].get() + ' already in | |
<filename>analyzer/darwin/lib/api/process.py
"""
Copyright (2014) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000, there is a non-exclusive license for use of
this work by or on behalf of the U.S. Government.
NOTICE:
For five (5) years from the United States Government is granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable worldwide license in this data to reproduce, prepare derivative works, and perform publicly and display publicly, by or on behalf of the Government. There is provision for the possible extension of the term of this license. Subsequent to that period or any extension granted, the United States Government is granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable worldwide license in this data to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so. The specific term of the license can be identified by inquiry made to Sandia Corporation or DOE.
NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS.
Any licensee of this software has the obligation and responsibility to abide by the applicable export control laws, regulations, and general prohibitions relating to the export of technical data. Failure to obtain an export control license or other authority from the Government may result in criminal liability under U.S. laws.
Provides the dylib injection functionality and a wrapper for process execution.
The classes that extend this for different file types are found in darwin/modules/packages.
"""
import os
import logging
import random
import sys
import time
import subprocess
from shutil import copy
import signal
from lib.common.rand import random_string
from lib.common.constants import PATHS, PIPE, SHUTDOWN_MUTEX
from lib.core.config import Config #parses the analysis.conf configuration file
log = logging.getLogger(__name__)
#This is the all-important list of APIs to trace
#There is an overhead for each one you add, so choose them carefully. Some examples are below.
api_traces = ["libSystem*", #provides libc API as well as access to kernel methods and low-level system calls
"CoreFoundation", #Provides primitives, data structures, etc.
"Foundation", #data structure support
"CoreServices", #access to things like Bonjour, Spotlight, AppleEvents, etc.
"libgcc*", #gcc, obviously
]
def randomize_dylib(dylib_path):
"""Randomize dylib name, to prevent easy detection by malware.
@return: new dylib path.
"""
new_dylib_name = random_string(6) # generate a random name
# make a path to the random name in the current working directory
new_dylib_path = os.path.join(os.getcwd(), "dylib", "{0}.dylib".format(new_dylib_name))
try:
# copy the dylib file to the new path in the current working directory
copy(dylib_path, new_dylib_path)
return new_dylib_path
except: #if this fails, just return the old path
return dylib_path
class Process:
""" OS X process """
first_process = True
cuckoohooks = "cuckoohooks.dylib"
startup_time = 0
def __init__(self, pid=0, h_process=None, thread_id=0, h_thread=0):
"""@param pid: PID.
@param h_process: process handle.
@param thread_id: thread id.
@param h_thread: thread handle.
"""
self.pid = pid
self.h_process = h_process
self.thread_id = thread_id
self.h_thread = h_thread
self.suspended = True #assume it is suspended to start with
self.event_handle = None
self.is_tracing = False
def execute(self, path, args=None, suspended=False):
"""Execute sample process.
@param path: sample path.
@param args: process args.
@param suspended: is suspended.
@return: operation status.
"""
#check to make sure the file is accessible
if os.access(path, os.F_OK):
log.info("File exists at path \"%s\"", path)
#by default you can't execute in the /tmp directory, so have to change permissions
i = 0
while not os.access(path, os.X_OK) and i < 2:
os.chmod(path, 0755)
i += 1
if not os.access(path, os.X_OK):
log.error("No permissions to execute file at path \"%s\", "
"execution aborted", path)
return False
# fork a child process
# Note: this could also be done with the subprocess or multiprocessing modules
# but neither of them gave the independence I was looking for.
try:
newpid = os.fork()
except OSError, e:
log.error("Failed to execute process from path \"%s\" with "
"arguments \"%s\" (Error: %s)", path, args, e)
return False
# randomize the hooking library name
dylib = randomize_dylib(os.path.join("dylib", self.cuckoohooks))
if newpid == 0: #if this is the child process
#set the environment variables for the syscall hook injection
new_environ = os.environ
new_environ['DYLD_FORCE_FLAT_NAMESPACE'] = '1'
new_environ['DYLD_INSERT_LIBRARIES'] = dylib
log.info("Child process with pid %d", os.getpid())
self.pid = os.getpid()
Process.first_process = False
# set the sid to make this child process independent of the parent
os.setsid()
# wait for traces to be initialized
app_log = os.path.join(PATHS["logs"], "api_calls_"+str(self.pid)+".log")
while not os.path.exists(app_log):
time.sleep(0.3)
# execute the given executable
if args is None:
os.execve(path, (path,), new_environ)
else:
os.execve(path, args, new_environ)
#exit when finished
os._exit(0)
else: #this is in the parent process
log.info("Parent process with pid %d", os.getpid())
#store the child process info
self.pid = newpid
self.h_process = psutil.Process(self.pid)
self.start_trace()
log.info("Successfully executed process from path \"%s\" with "
"arguments \"%s\" with pid %d", path, args, self.pid)
return True
def start_trace(self):
"""
Once a process has been started, write the library config file
and start the system call tracing.
@return: None
"""
# write configuration file for injected library
config_path = os.path.join(os.getenv("TMPDIR"), "%s.conf" % self.pid)
log.info("Writing configuration file at %s.", config_path)
with open(config_path, "w") as config:
cfg = Config("analysis.conf")
# The first time we come up with a random startup-time.
if Process.first_process:
# This adds 1 up to 30 times of 20 minutes to the startup
# time of the process, therefore bypassing anti-vm checks
# which check whether the VM has only been up for <10 minutes.
Process.startup_time = random.randint(1, 30) * 20 * 60 * 1000
config.write("host-ip={0}\n".format(cfg.ip))
config.write("host-port={0}\n".format(cfg.port))
config.write("pipe={0}\n".format(PIPE))
config.write("results={0}\n".format(PATHS["drop"]+"/"))
config.write("analyzer={0}\n".format(os.getcwd()))
config.write("first-process={0}\n".format(Process.first_process))
config.write("startup-time={0}\n".format(Process.startup_time))
config.write("shutdown-mutex={0}\n".format(SHUTDOWN_MUTEX))
Process.first_process = False
# Start system call tracing
# Dtruss traces system calls using Dtrace
pargs = ["dtruss", "-l", "-p", str(self.pid)]
truss_log = os.path.join(PATHS["logs"], "system_calls_"+str(self.pid)+".log")
results = open(truss_log, "a+")
try:
proc2 = subprocess.Popen(pargs, stdout=results, stderr=results)
log.info("Starting Dtruss on pid %d", self.pid)
except (OSError, ValueError):
log.exception("Failed to start system call monitor.")
results.close()
# Wait for initialization lines to appear in log files
while os.path.getsize(truss_log) == 0:
time.sleep(0.5)
# Dapptrace traces API calls using Dtrace. I used my own version modified for performance
# NOTE: This slows down the program A LOT if you use the -U option (tracks all libraries) instead of -u
os.chmod("lib/api/apitrace", 0755)
pargs = ["lib/api/apitrace", "-u", ",".join(api_traces), "-p", str(self.pid)]
app_log = os.path.join(PATHS["logs"], "api_calls_"+str(self.pid)+".log")
results2 = open(app_log, "a+")
try:
proc1 = subprocess.Popen(pargs, stdout=results2, stderr=results2)
log.info("Starting apitrace on pid %d", self.pid)
except (OSError, ValueError):
log.exception("Failed to start api call monitor.")
results2.close()
# wait for initialization lines to appear in log files
while os.path.getsize(app_log) == 0:
time.sleep(0.5)
self.is_tracing = True
self.resume()
def is_alive(self):
"""Process is alive?
@return: process status.
"""
exists = True
if not self.h_process:
exists = self.open()
if not exists: #program has already exited
return False
#make sure the process is both in the table and not a zombie (ie, terminated)
return self.h_process.is_running() and not (self.h_process.status() == psutil.STATUS_ZOMBIE)
def get_filepath(self):
"""Get process image file path.
@return: decoded file path.
"""
if not self.h_process:
self.open()
return self.h_process.name()
def exit_code(self):
"""Get process exit code.
@return: exit code value.
"""
if not self.h_process:
self.open()
return os.waitpid(self.pid)
def open(self):
"""Open a process and/or thread.
@return: operation status.
"""
ret = bool(self.pid or self.thread_id)
if self.pid and not self.h_process:
try:
self.h_process = psutil.Process(self.pid)
ret = True
except: #unable to get process
ret = False
return ret
def get_parent_pid(self):
"""Get the Parent Process ID."""
if not self.h_process:
self.open()
return self.h_process.ppid()
def terminate(self):
"""Terminate process.
@return: operation status.
"""
if self.h_process == 0:
self.open()
pargs = ["kill", str(self.pid)]
count = 0 #sometimes this requires multiple tries
while self.h_process.status() == psutil.STATUS_RUNNING:
# Note: both self.h_process.terminate() and os.kill were unreliable for termination
log.info("Attempting to kill process " + str(self.pid) + ", attempt " + str(count))
proc = subprocess.Popen(pargs)
count +=1
if count > 5:
break
if self.h_process.status() == psutil.STATUS_ZOMBIE or self.h_process.status() == psutil.STATUS_DEAD:
log.info("Successfully terminated | |
recursive functions that will use add_person to make
the tree of people (Descendants) to be included within the report.
"""
def __init__(self, dbase, canvas):
self.database = dbase
self.canvas = canvas
self.families_seen = set()
self.cols = []
self.__last_direct = []
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.max_generations = gui.get_val('maxgen')
self.max_spouses = gui.get_val('maxspouse')
self.inlc_marr = gui.get_val("inc_marr")
if not self.max_spouses:
self.inlc_marr = False
self.spouse_indent = gui.get_val('ind_spouse')
#is the option even available?
self.bold_direct = gui.get_val('bolddirect')
#can we bold direct descendants?
#bold_now will have only three values
#0 - no bolding
#1 - Only bold the first person
#2 - Bold all direct descendants
self.bold_now = 0
gui = None
def add_to_col(self, box):
"""
Add the box to a column on the canvas. we will do these things:
set the .linked_box attrib for the boxs in this col
get the height and width of this box and set it no the column
also we set the .x_cm to any s_level (indentation) here
we will calculate the real .x_cm later (with indentation)
"""
level = box.level[LVL_GEN]
#make the column list of people
while len(self.cols) <= level:
self.cols.append(None)
self.__last_direct.append(None)
if self.cols[level]: #if (not the first box in this column)
last_box = self.cols[level]
last_box.linked_box = box
#calculate the .y_cm for this box.
box.y_cm = last_box.y_cm
box.y_cm += last_box.height
if last_box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_shadow
if box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_pgap
else:
box.y_cm += self.canvas.report_opts.box_mgap
if box.level[LVL_ISDESC] == 0 and self.__last_direct[level]:
#ok, a new direct descendant.
#print level, box.father is not None, \
# self.__last_direct[level].father is not None, box.text[0], \
# self.__last_direct[level].text[0]
if box.father != self.__last_direct[level].father and \
box.father != self.__last_direct[level]:
box.y_cm += self.canvas.report_opts.box_pgap
self.cols[level] = box
if box.level[LVL_ISDESC] == 0:
self.__last_direct[level] = box
if self.spouse_indent:
box.x_cm = self.canvas.report_opts.spouse_offset * box.level[LVL_ISDESC]
else:
box.x_cm = 0.0
self.canvas.set_box_height_width(box)
def add_person_box(self, level, indi_handle, fams_handle, father):
""" Makes a person box and add that person into the Canvas. """
myself = PersonBox(level, True)
myself.father = father
if myself.level[LVL_ISDESC] == 0 and self.bold_direct and self.bold_now:
if self.bold_now == 1:
self.bold_now = 0
myself.set_bold()
if level[LVL_ISDESC] == 0 and father and myself.level[LVL_GEN] != father.level[LVL_GEN]:
#I am a child
if father.line_to:
line = father.line_to
else:
line = LineBase(father)
father.line_to = line
#self.canvas.add_line(line)
line.add_to(myself)
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
if indi_handle:
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def add_marriage_box(self, level, indi_handle, fams_handle, father):
""" Makes a marriage box and add that person into the Canvas. """
myself = FamilyBox(level, True)
#if father is not None:
# myself.father = father
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def recurse(self, person_handle, x_level, s_level, father):
"""traverse the ancestors recursively until
either the end of a line is found,
or until we reach the maximum number of generations
or we reach the max number of spouses
that we want to deal with"""
if not person_handle:
return
if x_level > self.max_generations:
return
if s_level > 0 and s_level == self.max_spouses:
return
if person_handle in self.families_seen:
return
myself = None
person = self.database.get_person_from_handle(person_handle)
family_handles = person.get_family_handle_list()
if s_level == 0:
val = family_handles[0] if family_handles else None
myself = self.add_person_box((x_level, s_level),
person_handle, val, father)
marr = None
spouse = None
if s_level == 1:
tmp_bold = self.bold_now
self.bold_now = 0
for family_handle in family_handles:
if family_handle not in self.families_seen:
self.families_seen.add(family_handle)
family = self.database.get_family_from_handle(family_handle)
#Marriage box if the option is there.
if self.inlc_marr and self.max_spouses > 0:
marr = self.add_marriage_box((x_level, s_level+1),
person_handle, family_handle,
father if s_level else myself)
spouse_handle = utils.find_spouse(person, family)
if (self.max_spouses > s_level and
spouse_handle not in self.families_seen):
def _spouse_box(who):
return self.add_person_box((x_level, s_level+1),
spouse_handle,
family_handle, who)
if s_level > 0:
spouse = _spouse_box(father)
elif self.inlc_marr:
spouse = _spouse_box(marr)
else:
spouse = _spouse_box(myself)
mykids = [kid.ref for kid in family.get_child_ref_list()]
def _child_recurse(who):
self.recurse(child_ref, x_level+1, 0, who)
for child_ref in mykids:
if self.inlc_marr and self.max_spouses > 0:
_child_recurse(marr)
elif spouse:
_child_recurse(spouse)
else:
_child_recurse(myself)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
#spouse_handle = utils.find_spouse(person,family)
self.recurse(spouse_handle, x_level, s_level+1, spouse)
if s_level == 1:
self.bold_now = tmp_bold
def add_family(self, level, family, father2):
"""
Adds a family into the canvas.
only will be used for my direct grandparents, and my parents only.
"""
family_h = family.get_handle()
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
self.bold_now = 2
if father_h:
father_b = self.add_person_box(
(level, 0), father_h, family_h, father2)
else:
father_b = self.add_person_box(
(level, 0), None, None, father2)
retrn = [father_b]
if self.inlc_marr:
family_b = self.add_marriage_box(
(level, 1), father_h, family_h, father_b)
retrn.append(family_b)
self.families_seen.add(family_h)
if mother_h:
mother_b = self.add_person_box(
(level, 0), mother_h, family_h, father_b)
else:
mother_b = self.add_person_box(
(level, 0), None, None, father_b)
retrn.append(mother_b)
family_line = family_b if self.inlc_marr else father_b
for child_ref in family.get_child_ref_list():
self.recurse(child_ref.ref, level+1, 0, family_line)
self.bold_now = 0
#Set up the lines for the family
line = family_line.line_to
if not line:
#no children.
line = LineBase(family_line)
family_line.line_to = line
if self.inlc_marr:
line.add_from(father_b)
line.add_from(mother_b)
return retrn
def has_children(self, person_handle):
"""
Quickly check to see if this person has children
still we want to respect the FamiliesSeen list
"""
if not person_handle or person_handle in self.families_seen:
return False
person = self.database.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
if family_handle not in self.families_seen:
family = self.database.get_family_from_handle(family_handle)
if family.get_child_ref_list():
return True
return False
def recurse_if(self, person_handle, level):
"""
Quickly check to see if we want to continue recursion
still we want to respect the FamiliesSeen list
"""
person = self.database.get_person_from_handle(person_handle)
show = False
myfams = person.get_family_handle_list()
if len(myfams) > 1: #and self.max_spouses > 0
show = True
if not self.inlc_marr:
#if the condition is true, we only want to show
#this parent again IF s/he has other children
show = self.has_children(person_handle)
#if self.max_spouses == 0 and not self.has_children(person_handle):
# self.families_seen.add(person_handle)
# show = False
if show:
self.bold_now = 1
self.recurse(person_handle, level, 0, None)
#------------------------------------------------------------------------
#
# Class MakePersonTree (Personal Descendant Tree option)
#
#------------------------------------------------------------------------
class MakePersonTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
self.max_generations -= 1
def start(self, person_id, center_boxes):
"""follow the steps to make a tree off of a person"""
persons = []
center_father = None
center_mother = None
center1 = self.database.get_person_from_gramps_id(person_id)
if center1 is None:
raise ReportError(_("Person %s is not in the Database") % person_id)
center1_h = center1.get_handle() #could be mom too.
family2 = family2_h = None
if self.do_parents:
family2_h = center1.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
#######################
#don't do center person's parents family.
if family2_h:
self.families_seen.add(family2_h)
#######################
#Center person's Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#Center persons parents only!
#######################
#now it will ONLY be my fathers parents
if family2:
family = self.add_family(0, family2, None)
# Save these so we can link up to them
center_father = family[0] if len(family) > 0 and isinstance(family[0], PersonBox) else None
center_mother = family[-1] if len(family) > 1 and isinstance(family[-1], PersonBox) else None
else:
self.bold_now = 2
self.recurse(center1_h, 0, 0, None)
self.bold_now = 0
#######################
#Center person's mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
if center_boxes is not None:
self.link_ancestors_to_center(center_boxes, center_father, center_mother)
return persons
def link_ancestors_to_center(self, center_boxes, center_father, center_mother):
print('\ncenter_boxes:')
print('father_b ' + debug_box(center_boxes[0]))
print(' marr_b ' + debug_box(center_boxes[1]))
print('mother_b ' + debug_box(center_boxes[2]))
print(' child_b ' + debug_box(center_boxes[3]))
ancestor_father = center_boxes[0]
ancestor_mother = center_boxes[2]
lines_to_remove = []
if ancestor_father is not None:
for line in self.canvas.lines:
try:
line.start.index(ancestor_father)
lines_to_remove.append(line)
continue
except ValueError:
pass
try:
line.end.remove(ancestor_father)
line.add_to(center_father)
except ValueError:
pass
if ancestor_mother is not None:
for line in self.canvas.lines:
try:
line.start.index(ancestor_mother)
lines_to_remove.append(line)
continue
except ValueError:
pass
try:
line.end.remove(ancestor_mother)
line.add_to(center_mother)
except ValueError:
pass
for line in lines_to_remove:
try:
_ = self.canvas.lines.remove(line)
except ValueError:
pass
for i, box in enumerate(center_boxes):
if box is not None:
try:
| |
number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Yager (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_yager_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Average:"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x | |
request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_test_gateway_by_id_with_http_info(test_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_gateway_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'test_gateway_id' is set
if ('test_gateway_id' not in params or
params['test_gateway_id'] is None):
raise ValueError("Missing the required parameter `test_gateway_id` when calling `delete_test_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'test_gateway_id' in params:
path_params['testGatewayId'] = params['test_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/testGateways/{testGatewayId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get_test_gateway_by_id(cls, test_gateway_id, **kwargs):
"""Find TestGateway
Return single instance of TestGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_test_gateway_by_id(test_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to return (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_test_gateway_by_id_with_http_info(test_gateway_id, **kwargs)
else:
(data) = cls._get_test_gateway_by_id_with_http_info(test_gateway_id, **kwargs)
return data
@classmethod
def _get_test_gateway_by_id_with_http_info(cls, test_gateway_id, **kwargs):
"""Find TestGateway
Return single instance of TestGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_test_gateway_by_id_with_http_info(test_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to return (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_gateway_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'test_gateway_id' is set
if ('test_gateway_id' not in params or
params['test_gateway_id'] is None):
raise ValueError("Missing the required parameter `test_gateway_id` when calling `get_test_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'test_gateway_id' in params:
path_params['testGatewayId'] = params['test_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/testGateways/{testGatewayId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestGateway',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def list_all_test_gateways(cls, **kwargs):
"""List TestGateways
Return a list of TestGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_test_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[TestGateway]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_test_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_test_gateways_with_http_info(**kwargs)
return data
@classmethod
def _list_all_test_gateways_with_http_info(cls, **kwargs):
"""List TestGateways
Return a list of TestGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_test_gateways_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[TestGateway]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
if 'page' in params:
query_params.append(('page', params['page']))
if 'size' in params:
query_params.append(('size', params['size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/testGateways', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='page[TestGateway]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def replace_test_gateway_by_id(cls, test_gateway_id, test_gateway, **kwargs):
"""Replace TestGateway
Replace all attributes of TestGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_test_gateway_by_id(test_gateway_id, test_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to replace (required)
:param TestGateway test_gateway: Attributes of testGateway to replace (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, **kwargs)
else:
(data) = cls._replace_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, **kwargs)
return data
@classmethod
def _replace_test_gateway_by_id_with_http_info(cls, test_gateway_id, test_gateway, **kwargs):
"""Replace TestGateway
Replace all attributes of TestGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to replace (required)
:param TestGateway test_gateway: Attributes of testGateway to replace (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_gateway_id', 'test_gateway']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'test_gateway_id' is set
if ('test_gateway_id' not in params or
params['test_gateway_id'] is None):
raise ValueError("Missing the required parameter `test_gateway_id` when calling `replace_test_gateway_by_id`")
# verify the required parameter 'test_gateway' is set
if ('test_gateway' not in params or
params['test_gateway'] is None):
raise ValueError("Missing the required parameter `test_gateway` when calling `replace_test_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'test_gateway_id' in params:
path_params['testGatewayId'] = params['test_gateway_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'test_gateway' in params:
body_params = params['test_gateway']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/testGateways/{testGatewayId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestGateway',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_test_gateway_by_id(cls, test_gateway_id, test_gateway, **kwargs):
"""Update TestGateway
Update attributes of TestGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_test_gateway_by_id(test_gateway_id, test_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to update. (required)
:param TestGateway test_gateway: Attributes of testGateway to update. (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, **kwargs)
else:
(data) = cls._update_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, **kwargs)
return data
@classmethod
def _update_test_gateway_by_id_with_http_info(cls, test_gateway_id, test_gateway, **kwargs):
"""Update TestGateway
Update attributes of TestGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_test_gateway_by_id_with_http_info(test_gateway_id, test_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str test_gateway_id: ID of testGateway to update. (required)
:param TestGateway test_gateway: Attributes of testGateway to update. (required)
:return: TestGateway
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_gateway_id', 'test_gateway']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'test_gateway_id' is set
if ('test_gateway_id' not in params or
params['test_gateway_id'] is None):
raise ValueError("Missing the required parameter `test_gateway_id` when calling `update_test_gateway_by_id`")
# verify the required parameter 'test_gateway' is set
if ('test_gateway' not in params or
params['test_gateway'] is None):
raise ValueError("Missing the required parameter `test_gateway` when calling `update_test_gateway_by_id`")
collection_formats = {}
path_params = {}
if 'test_gateway_id' in params:
path_params['testGatewayId'] = params['test_gateway_id']
header_params = {}
| |
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import pandas as pd
import numpy as np
EXAMPLE_ROWS =5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'col 2': [1, 2, 3, 4, 5], 'col3': [100, 200, 300, 400, 500]})
attributes = {'format': 'csv', 'name': 'DF_name'}
default_msg = api.Message(attributes=attributes,body = df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': ''}
operator_description = "Cleanse Heuristics"
operator_description_long = "A couple of heuristics collected into one operator for cleansing data in a DataFrame."
add_readme = {}
value_to_nan = 'None'
config_params['value_to_nan'] = {'title': 'Set value to Null/Nan', 'description': 'Sets all data of categorical columns with value to nan', 'type': 'string'}
yes_no_to_num = False
config_params['yes_no_to_num'] = {'title': 'Yes/No to Numeric', 'description': 'Yes/No to Numeric 1/0', 'type': 'boolean'}
drop_nan_columns = False
config_params['drop_nan_columns'] = {'title': 'Drops columns when all values are NaN', 'description': 'Drop NaN Columns', 'type': 'boolean'}
all_constant_to_NaN = False
config_params['all_constant_to_NaN'] = {'title': 'Columns with unique Value to NaN', 'description': 'Columns with unique Value to NaN', 'type': 'boolean'}
threshold_unique = 0
config_params['threshold_unique'] = {'title': 'Threshold of unique values', 'description': 'Threshold of unique values set to 1 (-> value exist) ', 'type': 'number'}
threshold_unique_cols = 'None'
config_params['threshold_unique_cols'] = {'title': 'Columns for unique threshold criteria', 'description': 'Columns for unique threshold criteria', 'type': 'string'}
sparse = 0
config_params['sparse'] = {'title': 'Sparse ', 'description': 'Absolute or relative number criteria of sparsenss. All values of column are set to nan', 'type': 'number'}
sparse_cols = 'None'
config_params['sparse_cols'] = {'title': 'Columns for check on sparse', 'description': 'Columns for check on sparse', 'type': 'string'}
drop_nan_rows_cols = 'None'
config_params['drop_nan_rows_cols'] = {'title': 'Drop NaN rows columns', 'description': 'Columns for dropping NaN rows ', 'type': 'string'}
rare_value_quantile = 0
config_params['rare_value_quantile'] = {'title': 'Rare Value Quantile', 'description': 'Rare Value Quantile', 'type': 'number'}
rare_value_cols = 'None'
config_params['rare_value_cols'] = {'title': 'Columns for Rare Value Criteria', 'description': 'Columns for Rare Value Criteria', 'type': 'string'}
rare_value_std = None
config_params['rare_value_std'] = {'title': 'Rare Value Standard Deviation', 'description': 'Rare Value Standard Deviation', 'type': 'number'}
max_cat_num = 0
config_params['max_cat_num'] = {'title': 'Maximum Number of Categories', 'description': 'Maximum Number of Categories', 'type': 'number'}
max_cat_num_cols = 'None'
config_params['max_cat_num_cols'] = {'title': 'Columns for Maximum Number Categories Criteria', 'description': 'Columns for Maximum Number Categories Criteria', 'type': 'string'}
reduce_categoricals_only = False
config_params['reduce_categoricals_only'] = {'title': 'Reduce Categorical Type Columns only', 'description': 'Reduce Categorical Type Columns only', 'type': 'boolean'}
remove_duplicates_cols = 'None'
config_params['remove_duplicates_cols'] = {'title': 'Columns for Remove Duplicate Criteria', 'description': 'Columns for Remove Duplicate Criteria', 'type': 'string'}
fill_categoricals_nan = 'None'
config_params['fill_categoricals_nan'] = {'title': 'Value to replace NaN', 'description': 'Value that replaces NaN for categorical columns', 'type': 'string'}
fill_numeric_nan_zero = False
config_params['fill_numeric_nan_zero'] = {'title': 'Replaces numeric type columns nan with 0',
'description': 'Replaces numeric type columns nan with 0',
'type': 'boolean'}
#cut_obj_size = 0
#config_params['cut_obj_size'] = {'title': 'Cut object siez', 'description': 'Truncate lengthy strings to this size', 'type': 'number'}
def process(msg) :
logger, log_stream = slog.set_logging('DEBUG')
# start custom process definition
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
###### start of doing calculation
att_dict['prev_number_columns'] = df.shape[1]
att_dict['prev_number_rows'] = df.shape[0]
#
att_dict['config']['remove_duplicates_cols'] = api.config.remove_duplicates_cols
remove_duplicates_cols = tfp.read_list(api.config.remove_duplicates_cols)
if remove_duplicates_cols:
df = df.groupby(remove_duplicates_cols).first().reset_index()
logger.debug('#Dropped duplicates: {} - {} = {}'.format(att_dict['prev_number_rows'], df.shape[0], \
att_dict['prev_number_rows'] - df.shape[0]))
att_dict['config']['value_to_nan'] = api.config.value_to_nan
value_to_nan = tfp.read_value(api.config.value_to_nan)
if value_to_nan:
df.select_dtypes(include='object').replace(value_to_nan, value_to_nan.nan, inplace=True)
att_dict['config']['yes_no_to_boolean'] = str(api.config.yes_no_to_num)
if api.config.yes_no_to_num:
prev_categoricals = len(df.select_dtypes(include=np.object).columns)
for col in df.select_dtypes(include=np.object):
df[col] = df[col].str.upper()
vals = [x for x in df.loc[df[col].notnull(), col].unique()]
if len(vals) == 1 and vals[0] in ['YES', 'Y']:
df.loc[df[col].notnull(), col] = 1
df.loc[df[col].isnull(), col] = 0
try:
df[col] = df[col].astype('int8')
except ValueError:
print('Value Error: {}'.format(col))
print(df[col].unique())
if len(vals) == 1 and vals[0] in ['NO', 'N']:
df.loc[df[col].notnull(), col] = 1
df.loc[df[col].isnull(), col] = 0
df[col] = df[col].astype('int8')
if len(vals) == 2 and (all(i in vals for i in ['YES', 'NO']) or all(i in vals for i in ['Y', 'N'])):
df[col].replace(to_replace={'NO': 0, 'N': 0, 'no': 0, 'n': 0, 'YES': 1, 'Y': 1, 'yes': 1, 'y': 1})
df[col] = df[col].astype('int8')
after_categoricals = len(df.select_dtypes(include=np.object).columns)
logger.debug('<yes_no_to_boolean> impact: {} -> {}'.format(prev_categoricals, after_categoricals))
att_dict['config']['all_constant_to_NaN'] = str(api.config.all_constant_to_NaN)
if api.config.all_constant_to_NaN:
num_constant_cols = 0
for col in df.columns:
unique_vals = df[col].unique()
if len(unique_vals) == 1:
df[col] = np.nan
num_constant_cols = num_constant_cols + 1
logger.debug('<all_constant_to_NaN> number of columns: {}'.format(num_constant_cols))
# remove rare value rows with quantile
att_dict['config']['rare_value_cols'] = api.config.rare_value_cols
att_dict['config']['rare_value_quantile'] = api.config.rare_value_quantile
att_dict['config']['rare_value_std'] = api.config.rare_value_std
rare_value_cols = tfp.read_list(api.config.rare_value_cols, list(df.columns))
if rare_value_cols:
logger.debug('quantile')
# drop rare values by quantile
if api.config.rare_value_quantile > 0:
if not api.config.rare_value_quantile >= 0 and api.config.rare_value_quantile < 1:
raise ValueError('Quantile value range: [0,1[, not {}'.format(api.config.rare_value_quantile))
num_reduce_categoricals_col = 0
for col in rare_value_cols:
unique_num = len(df[col].unique())
val_num = df[col].count()
ratio = df[col].count() / len(df[col].unique())
threshold = df[col].count() / len(df[col].unique()) * api.config.rare_value_quantile
value_counts = df[col].value_counts() # Specific column
# kept_values = value_counts[value_counts > threshold].count()
if value_counts[value_counts > threshold].count() > 1:
to_remove = value_counts[value_counts <= threshold].index
if len(to_remove) > 0:
logger.debug(
'Drop rare value by quantile: Column {}: {}/{} '.format(col, len(to_remove), unique_num))
df[col].replace(to_remove, np.nan, inplace=True)
num_reduce_categoricals_col += 1
logger.debug('<rare_value_quantile> impact on columns: {}/{}'.format(num_reduce_categoricals_col,
len(rare_value_cols)))
# drop rare values by std
if api.config.rare_value_std > 0:
num_reduce_categoricals_col = 0
for col in df.columns:
unique_num = len(df[col].unique())
value_counts = df[col].value_counts()
mean = value_counts.mean()
threshold = value_counts.mean() - value_counts.std() * api.config.rare_value_std
if threshold > 1:
to_remove = value_counts[value_counts <= threshold].index
if len(to_remove) > 0:
logger.debug(
'Drop rare value by std: Column {}: {}/{} '.format(col, len(to_remove), unique_num))
df[col].replace(to_remove, np.nan, inplace=True)
num_reduce_categoricals_col += 1
logger.debug(
'<rare_value_std> impact on columns: {}/{}'.format(num_reduce_categoricals_col, len(rare_value_cols)))
# for unique values less then threshold_unique set to 1. All NaN set to 0
att_dict['config']['threshold_unique_cols'] = api.config.threshold_unique_cols
att_dict['config']['threshold_unique'] = api.config.threshold_unique
threshold_unique_cols = tfp.read_list(api.config.threshold_unique_cols, list(df.columns))
if threshold_unique_cols:
prev_obj_cols = len(df.select_dtypes("object"))
for col in threshold_unique_cols:
if df[col].dtype == np.object:
unique_vals = list(df[col].unique())
if len(unique_vals) <= api.config.threshold_unique:
# test if one of the values is nan
if np.nan in unique_vals:
df.loc[df[col].notnull(), col] = 1
df.loc[df[col].isnull(), col] = 0
df[col] = df[col].astype('int8')
after_obj_cols = len(df.select_dtypes("object"))
logger.debug(
'Threshold unique effect on number of categorical columns: {} -> {}'.format(prev_obj_cols, after_obj_cols))
# for count values less then threshold_count set to NaN
att_dict['config']['sparse_cols'] = api.config.sparse_cols
att_dict['config']['sparse'] = api.config.sparse
sparse_cols = tfp.read_list(api.config.sparse_cols)
if sparse_cols:
logger.debug('Sparse check')
if api.config.reduce_categoricals_only:
test_cols = [ot for ot in sparse_cols if df[ot].dtype == np.object]
if api.config.sparse < 1:
api.config.sparse = api.config.sparse * df.shape[0]
for col in sparse_cols:
if df[col].count() < api.config.sparse_freq:
logger.debug('Threshold_count: Removed column {} (#values {})'.format(col, df[col].count()))
df[col] = np.nan
# removes columns with to many category values that could not be transposed
att_dict['config']['max_cat_num'] = api.config.max_cat_num
att_dict['config']['max_cat_num_cols'] = api.config.max_cat_num_cols
max_cat_num_cols = tfp.read_list(api.config.max_cat_num_cols)
if api.config.max_cat_num > 0 and max_cat_num_cols:
drop_cols = list()
for col in max_cat_num_cols:
if df[col].dtype == np.object:
if len(df[col].unique()) > api.config.max_cat_num:
drop_cols.append(col)
df.drop(columns=drop_cols, inplace=True)
# remove cols with only NaN
att_dict['config']['drop_nan_columns'] = api.config.drop_nan_columns
if api.config.drop_nan_columns:
df.dropna(axis='columns', how='all', inplace=True)
# remove rows with NAN except for dimension cols
att_dict['config']['drop_nan_rows_cols'] = api.config.drop_nan_rows_cols
drop_nan_rows_cols = tfp.read_list(api.config.drop_nan_rows_cols, df.columns)
if drop_nan_rows_cols:
prev_row_num = df.shape[0]
df[drop_nan_rows_cols].dropna(subset=drop_nan_rows_cols, how='all', inplace=True)
logger.debug('<drop_nan_rows_cols> deleted rows: {}/{}'.format(prev_row_num - df.shape[0], prev_row_num))
# maps a certain value to nan for all object type columns
if tfp.read_value(api.config.fill_categoricals_nan):
cat_cols = df.select_dtypes(include='object')
for col in cat_cols:
df[col].fillna(value=api.config.fill_categoricals_nan, inplace=True)
# im construction error-prone and ugly
#if api.config.cut_obj_size > 0:
# cols_obj = df.select_dtypes(include='object')
# dict_mapping = dict()
# for col in cols_obj:
# if df[col].str.len().max() > api.config.cut_obj_size:
# catmap = dict(enumerate(df[col].unique()))
# valmap = {val: val[:api.config.cut_obj_size - 3] + '_' + str(cat) for cat, val in catmap.items()}
# if len(api.config.fill_categoricals_nan) > 0:
# if api.config.fill_categoricals_nan in valmap.keys():
# valmap[api.config.fill_categoricals_nan] = api.config.fill_categoricals_nan
# df[col] = df[col].map(valmap) # problem
# df[col].str.replace(r'[,\.:;]', '')
# | |
<filename>angr/project.py
import logging
import os
import types
from io import BytesIO, IOBase
import pickle
import string
from collections import defaultdict
from typing import Dict, Any
import archinfo
from archinfo.arch_soot import SootAddressDescriptor, ArchSoot
import cle
from .misc.ux import deprecated
from .errors import AngrNoPluginError
l = logging.getLogger(name=__name__)
def load_shellcode(shellcode, arch, start_offset=0, load_address=0, thumb=False, **kwargs):
"""
Load a new project based on a snippet of assembly or bytecode.
:param shellcode: The data to load, as either a bytestring of instructions or a string of assembly text
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
:param thumb: Whether this is ARM Thumb shellcode
"""
if not isinstance(arch, archinfo.Arch):
arch = archinfo.arch_from_id(arch)
if type(shellcode) is str:
shellcode = arch.asm(shellcode, load_address, thumb=thumb)
if thumb:
start_offset |= 1
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
},
**kwargs
)
class Project:
"""
This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between
them, and perform analyses on them.
:param thing: The path to the main executable object to analyze, or a CLE Loader object.
The following parameters are optional.
:param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'.
:param ignore_functions: A list of function names that, when imported from shared libraries, should
never be stepped into in analysis (calls will return an unconstrained value).
:param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are
available with said simprocedures.
:param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap
it with a simprocedure.
:param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures.
:param arch: The target architecture (auto-detected otherwise).
:param simos: a SimOS class to use for this project.
:param engine: The SimEngine class to use for this project.
:param bool translation_cache: If True, cache translated basic blocks rather than re-translating them.
:param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation
will try to read code from the current state instead of the original memory,
regardless of the current memory protections.
:type support_selfmodifying_code: bool
:param store_function: A function that defines how the Project should be stored. Default to pickling.
:param load_function: A function that defines how the Project should be loaded. Default to unpickling.
:param analyses_preset: The plugin preset for the analyses provider (i.e. Analyses instance).
:type analyses_preset: angr.misc.PluginPreset
Any additional keyword arguments passed will be passed onto ``cle.Loader``.
:ivar analyses: The available analyses.
:type analyses: angr.analysis.Analyses
:ivar entry: The program entrypoint.
:ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results.
:type factory: AngrObjectFactory
:ivar filename: The filename of the executable.
:ivar loader: The program loader.
:type loader: cle.Loader
:ivar storage: Dictionary of things that should be loaded/stored with the Project.
:type storage: defaultdict(list)
"""
def __init__(self, thing,
default_analysis_mode=None,
ignore_functions=None,
use_sim_procedures=True,
exclude_sim_procedures_func=None,
exclude_sim_procedures_list=(),
arch=None, simos=None,
engine=None,
load_options: Dict[str, Any]=None,
translation_cache=True,
support_selfmodifying_code=False,
store_function=None,
load_function=None,
analyses_preset=None,
concrete_target=None,
**kwargs):
# Step 1: Load the binary
if load_options is None: load_options = {}
load_options.update(kwargs)
if arch is not None:
load_options.update({'arch': arch})
if isinstance(thing, cle.Loader):
if load_options:
l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!")
self.loader = thing
self.filename = self.loader.main_object.binary
elif hasattr(thing, 'read') and hasattr(thing, 'seek'):
l.info("Loading binary from stream")
self.filename = None
self.loader = cle.Loader(thing, **load_options)
elif not isinstance(thing, str) or not os.path.exists(thing) or not os.path.isfile(thing):
raise Exception("Not a valid binary file: %s" % repr(thing))
else:
# use angr's loader, provided by cle
l.info("Loading binary %s", thing)
self.filename = thing
self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options)
# Step 2: determine its CPU architecture, ideally falling back to CLE's guess
if isinstance(arch, str):
self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this
elif isinstance(arch, archinfo.Arch):
self.arch = arch # type: archinfo.Arch
elif arch is None:
self.arch = self.loader.main_object.arch
else:
raise ValueError("Invalid arch specification.")
# Step 3: Set some defaults and set the public and private properties
if not default_analysis_mode:
default_analysis_mode = 'symbolic'
if not ignore_functions:
ignore_functions = []
if isinstance(exclude_sim_procedures_func, types.LambdaType):
l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to "
"Project causes the resulting object to be un-serializable.")
self._sim_procedures = {}
self.concrete_target = concrete_target
# It doesn't make any sense to have auto_load_libs
# if you have the concrete target, let's warn the user about this.
if self.concrete_target and load_options.get('auto_load_libs', None):
l.critical("Incompatible options selected for this project, please disable auto_load_libs if "
"you want to use a concrete target.")
raise Exception("Incompatible options for the project")
if self.concrete_target and self.arch.name not in ['X86', 'AMD64', 'ARMHF']:
l.critical("Concrete execution does not support yet the selected architecture. Aborting.")
raise Exception("Incompatible options for the project")
self._default_analysis_mode = default_analysis_mode
self._exclude_sim_procedures_func = exclude_sim_procedures_func
self._exclude_sim_procedures_list = exclude_sim_procedures_list
self.use_sim_procedures = use_sim_procedures
self._ignore_functions = ignore_functions
self._support_selfmodifying_code = support_selfmodifying_code
self._translation_cache = translation_cache
self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below
if self._support_selfmodifying_code:
if self._translation_cache is True:
self._translation_cache = False
l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.")
self.entry = self.loader.main_object.entry
self.storage = defaultdict(list)
self.store_function = store_function or self._store
self.load_function = load_function or self._load
# Step 4: Set up the project's hubs
# Step 4.1 Factory
self.factory = AngrObjectFactory(self, default_engine=engine)
# Step 4.2: Analyses
self._analyses_preset = analyses_preset
self.analyses = None
self._initialize_analyses_hub()
# Step 4.3: ...etc
self.kb = KnowledgeBase(self, name="global")
# Step 5: determine the guest OS
if isinstance(simos, type) and issubclass(simos, SimOS):
self.simos = simos(self) #pylint:disable=invalid-name
elif isinstance(simos, str):
self.simos = os_mapping[simos](self)
elif simos is None:
self.simos = os_mapping[self.loader.main_object.os](self)
else:
raise ValueError("Invalid OS specification or non-matching architecture.")
self.is_java_project = isinstance(self.arch, ArchSoot)
self.is_java_jni_project = isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support
# Step 6: Register simprocedures as appropriate for library functions
if isinstance(self.arch, ArchSoot) and self.simos.is_javavm_with_jni_support:
# If we execute a Java archive that includes native JNI libraries,
# we need to use the arch of the native simos for all (native) sim
# procedures.
sim_proc_arch = self.simos.native_arch
else:
sim_proc_arch = self.arch
for obj in self.loader.initial_load_objects:
self._register_object(obj, sim_proc_arch)
# Step 7: Run OS-specific configuration
self.simos.configure_project()
def _initialize_analyses_hub(self):
"""
Initializes self.analyses using a given preset.
"""
self.analyses = AnalysesHub(self)
self.analyses.use_plugin_preset(self._analyses_preset if self._analyses_preset is not None else 'default')
def _register_object(self, obj, sim_proc_arch):
"""
This scans through an objects imports and hooks them with simprocedures from our library whenever possible
"""
# Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols
missing_libs = []
for lib_name in self.loader.missing_dependencies:
try:
missing_libs.append(SIM_LIBRARIES[lib_name])
except KeyError:
l.info("There are no simprocedures for missing library %s :(", lib_name)
# additionally provide libraries we _have_ loaded as a fallback fallback
# this helps in the case that e.g. CLE picked up a linux arm libc to satisfy an android arm binary
for lib in self.loader.all_objects:
if lib.provides in SIM_LIBRARIES:
simlib = SIM_LIBRARIES[lib.provides]
if simlib not in missing_libs:
missing_libs.append(simlib)
# Step 2: Categorize every "import" symbol in each object.
# If it's IGNORED, mark it for stubbing
# If it's blacklisted, don't process it
# If it matches a simprocedure we have, replace it
for reloc in obj.imports.values():
# Step 2.1: Quick filter on symbols we really don't care about
func = reloc.symbol
if func is None:
continue
if not func.is_function and func.type != cle.backends.symbol.SymbolType.TYPE_NONE:
continue
if func.resolvedby is None:
# I don't understand the binary which made me add this case. If you are debugging and see this comment,
# good luck.
# ref: https://github.com/angr/angr/issues/1782
# (I also don't know why the TYPE_NONE check in the previous clause is there but I can't find a ref for
# that. they are probably related.)
continue
if not reloc.resolved:
# This is a hack, effectively to support Binary Ninja, which doesn't provide access to dependency
# library names. The backend creates the | |
"""Miscellaneous utilities"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
from binascii import b2a_hex
import concurrent.futures
from datetime import datetime, timezone
import random
import errno
import hashlib
from hmac import compare_digest
import inspect
import os
import socket
import sys
import threading
import ssl
import uuid
import warnings
from async_generator import aclosing, asynccontextmanager, async_generator, yield_
from tornado import gen, ioloop, web
from tornado.platform.asyncio import to_asyncio_future
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.log import app_log
def random_port():
"""Get a single random port."""
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
# SKKKKKKKKKKKKKKKKKKKKKKKKK
# port = 55555
return port
# ISO8601 for strptime with/without milliseconds
ISO8601_ms = '%Y-%m-%dT%H:%M:%S.%fZ'
ISO8601_s = '%Y-%m-%dT%H:%M:%SZ'
def isoformat(dt):
"""Render a datetime object as an ISO 8601 UTC timestamp
Naïve datetime objects are assumed to be UTC
"""
# allow null timestamps to remain None without
# having to check if isoformat should be called
if dt is None:
return None
if dt.tzinfo:
dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
return dt.isoformat() + 'Z'
def can_connect(ip, port):
"""Check if we can connect to an ip:port.
Return True if we can connect, False otherwise.
"""
if ip in {'', '0.0.0.0'}:
ip = '127.0.0.1'
try:
socket.create_connection((ip, port)).close()
except socket.error as e:
if e.errno not in {errno.ECONNREFUSED, errno.ETIMEDOUT}:
app_log.error("Unexpected error connecting to %s:%i %s", ip, port, e)
return False
else:
return True
def make_ssl_context(
keyfile, certfile, cafile=None,
verify=True, check_hostname=True):
"""Setup context for starting an https server or making requests over ssl.
"""
if not keyfile or not certfile:
return None
purpose = ssl.Purpose.SERVER_AUTH if verify else ssl.Purpose.CLIENT_AUTH
ssl_context = ssl.create_default_context(purpose, cafile=cafile)
ssl_context.load_cert_chain(certfile, keyfile)
ssl_context.check_hostname = check_hostname
return ssl_context
async def exponential_backoff(
pass_func,
fail_message,
start_wait=0.2,
scale_factor=2,
max_wait=5,
timeout=10,
timeout_tolerance=0.1,
*args, **kwargs):
"""
Exponentially backoff until `pass_func` is true.
The `pass_func` function will wait with **exponential backoff** and
**random jitter** for as many needed iterations of the Tornado loop,
until reaching maximum `timeout` or truthiness. If `pass_func` is still
returning false at `timeout`, a `TimeoutError` will be raised.
The first iteration will begin with a wait time of `start_wait` seconds.
Each subsequent iteration's wait time will scale up by continuously
multiplying itself by `scale_factor`. This continues for each iteration
until `pass_func` returns true or an iteration's wait time has reached
the `max_wait` seconds per iteration.
`pass_func` may be a future, although that is not entirely recommended.
Parameters
----------
pass_func
function that is to be run
fail_message : str
message for a `TimeoutError`
start_wait : optional
initial wait time for the first iteration in seconds
scale_factor : optional
a multiplier to increase the wait time for each iteration
max_wait : optional
maximum wait time per iteration in seconds
timeout : optional
maximum time of total wait in seconds
timeout_tolerance : optional
a small multiplier used to add jitter to `timeout`'s deadline
*args, **kwargs
passed to `pass_func(*args, **kwargs)`
Returns
-------
value of `pass_func(*args, **kwargs)`
Raises
------
TimeoutError
If `pass_func` is still false at the end of the `timeout` period.
Notes
-----
See https://www.awsarchitectureblog.com/2015/03/backoff.html
for information about the algorithm and examples. We're using their
full Jitter implementation equivalent.
"""
loop = ioloop.IOLoop.current()
deadline = loop.time() + timeout
# add jitter to the deadline itself to prevent re-align of a bunch of
# timing out calls once the deadline is reached.
if timeout_tolerance:
tol = timeout_tolerance * timeout
deadline = random.uniform(deadline - tol, deadline + tol)
scale = 1
while True:
ret = await maybe_future(pass_func(*args, **kwargs))
# Truthy!
if ret:
return ret
remaining = deadline - loop.time()
if remaining < 0:
# timeout exceeded
break
# add some random jitter to improve performance
# this prevents overloading any single tornado loop iteration with
# too many things
dt = min(max_wait, remaining, random.uniform(0, start_wait * scale))
scale *= scale_factor
await gen.sleep(dt)
raise TimeoutError(fail_message)
async def wait_for_server(ip, port, timeout=10):
"""Wait for any server to show up at ip:port."""
if ip in {'', '0.0.0.0'}:
ip = '127.0.0.1'
await exponential_backoff(
lambda: can_connect(ip, port),
"Server at {ip}:{port} didn't respond in {timeout} seconds".format(ip=ip, port=port, timeout=timeout),
timeout=timeout
)
async def wait_for_http_server(url, timeout=10, ssl_context=None):
"""Wait for an HTTP Server to respond at url.
Any non-5XX response code will do, even 404.
"""
loop = ioloop.IOLoop.current()
tic = loop.time()
client = AsyncHTTPClient()
if ssl_context:
client.ssl_options = ssl_context
async def is_reachable():
try:
r = await client.fetch(url, follow_redirects=False)
return r
except HTTPError as e:
if e.code >= 500:
# failed to respond properly, wait and try again
if e.code != 599:
# we expect 599 for no connection,
# but 502 or other proxy error is conceivable
app_log.warning(
"Server at %s responded with error: %s", url, e.code)
else:
app_log.debug("Server at %s responded with %s", url, e.code)
return e.response
except (OSError, socket.error) as e:
if e.errno not in {errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET}:
app_log.warning("Failed to connect to %s (%s)", url, e)
return False
re = await exponential_backoff(
is_reachable,
"Server at {url} didn't respond in {timeout} seconds".format(url=url, timeout=timeout),
timeout=timeout
)
return re
# Decorators for authenticated Handlers
def auth_decorator(check_auth):
"""Make an authentication decorator.
I heard you like decorators, so I put a decorator
in your decorator, so you can decorate while you decorate.
"""
def decorator(method):
def decorated(self, *args, **kwargs):
check_auth(self)
return method(self, *args, **kwargs)
decorated.__name__ = method.__name__
decorated.__doc__ = method.__doc__
return decorated
decorator.__name__ = check_auth.__name__
decorator.__doc__ = check_auth.__doc__
return decorator
@auth_decorator
def token_authenticated(self):
"""Decorator for method authenticated only by Authorization token header
(no cookies)
"""
if self.get_current_user_token() is None:
raise web.HTTPError(403)
@auth_decorator
def authenticated_403(self):
"""Decorator for method to raise 403 error instead of redirect to login
Like tornado.web.authenticated, this decorator raises a 403 error
instead of redirecting to login.
"""
if self.current_user is None:
raise web.HTTPError(403)
@auth_decorator
def admin_only(self):
"""Decorator for restricting access to admin users"""
user = self.current_user
if user is None or not user.admin:
raise web.HTTPError(403)
@auth_decorator
def metrics_authentication(self):
"""Decorator for restricting access to metrics"""
user = self.current_user
if user is None and self.authenticate_prometheus:
raise web.HTTPError(403)
# Token utilities
def new_token(*args, **kwargs):
"""Generator for new random tokens
For now, just UUIDs.
"""
return uuid.uuid4().hex
def hash_token(token, salt=8, rounds=16384, algorithm='sha512'):
"""Hash a token, and return it as `algorithm:salt:hash`.
If `salt` is an integer, a random salt of that many bytes will be used.
"""
h = hashlib.new(algorithm)
if isinstance(salt, int):
salt = b2a_hex(os.urandom(salt))
if isinstance(salt, bytes):
bsalt = salt
salt = salt.decode('utf8')
else:
bsalt = salt.encode('utf8')
btoken = token.encode('utf8', 'replace')
h.update(bsalt)
for i in range(rounds):
h.update(btoken)
digest = h.hexdigest()
return "{algorithm}:{rounds}:{salt}:{digest}".format(**locals())
def compare_token(compare, token):
"""Compare a token with a hashed token.
Uses the same algorithm and salt of the hashed token for comparison.
"""
algorithm, srounds, salt, _ = compare.split(':')
hashed = hash_token(token, salt=salt, rounds=int(srounds), algorithm=algorithm).encode('utf8')
compare = compare.encode('utf8')
if compare_digest(compare, hashed):
return True
return False
def url_path_join(*pieces):
"""Join components of url into a relative url.
Use to prevent double slash when joining subpath. This will leave the
initial and final / in place.
Copied from `notebook.utils.url_path_join`.
"""
initial = pieces[0].startswith('/')
final = pieces[-1].endswith('/')
stripped = [ s.strip('/') for s in pieces ]
result = '/'.join(s for s in stripped if s)
if initial:
result = '/' + result
if final:
result = result + '/'
if result == '//':
result = '/'
return result
def print_ps_info(file=sys.stderr):
"""Print process summary info from psutil
warns if psutil is unavailable
"""
try:
import psutil
except ImportError:
# nothing to print
warnings.warn(
"psutil unavailable. Install psutil to get CPU and memory stats",
stacklevel=2
)
return
p = psutil.Process()
# format CPU percentage
cpu = p.cpu_percent(0.1)
if cpu >= 10:
cpu_s = "%i" % cpu
else:
cpu_s = "%.1f" % cpu
# format memory (only resident set)
rss = p.memory_info().rss
if rss >= 1e9:
mem_s = '%.1fG' % (rss/1e9)
elif rss >= 1e7:
mem_s = '%.0fM' % (rss/1e6)
elif rss >= 1e6:
mem_s = '%.1fM' % (rss/1e6)
else:
mem_s = '%.0fk' % (rss/1e3)
# left-justify and shrink-to-fit columns
cpulen = max(len(cpu_s), 4)
memlen = max(len(mem_s), 3)
fd_s = str(p.num_fds())
fdlen = max(len(fd_s), 3)
threadlen = len('threads')
print("%s %s %s %s" % (
'%CPU'.ljust(cpulen),
'MEM'.ljust(memlen),
'FDs'.ljust(fdlen),
'threads',
), file=file)
print("%s %s %s %s" % (
cpu_s.ljust(cpulen),
mem_s.ljust(memlen),
fd_s.ljust(fdlen),
str(p.num_threads()).ljust(7),
), file=file)
# trailing blank line
print('', file=file)
def print_stacks(file=sys.stderr):
| |
the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and
resolution) for all other dimensions remain unchanged.
"""
return reduce_dimension(data=self, reducer=reducer, dimension=dimension, context=context)
def reduce_dimension_binary(self, reducer, dimension, context=UNSET) -> 'ProcessBuilder':
"""
Reduce dimensions using binary reduction
:param self: A data cube.
:param reducer: A reduction operator to be applied consecutively on pairs of values. It must be both
associative and commutative as the execution may be executed in parallel and therefore the order of
execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or
consist of multiple sub-processes.
:param dimension: The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable`
error if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the given dimension, the number of
dimensions decreases by one. The dimension properties (name, type, labels, reference system and
resolution) for all other dimensions remain unchanged.
"""
return reduce_dimension_binary(data=self, reducer=reducer, dimension=dimension, context=context)
def reduce_spatial(self, reducer, context=UNSET) -> 'ProcessBuilder':
"""
Reduce spatial dimensions 'x' and 'y'
:param self: A data cube.
:param reducer: A reducer to apply on the horizontal spatial dimensions. A reducer is a single process
such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the
category 'reducer' for such processes.
:param context: Additional data to be passed to the reducer.
:return: A data cube with the newly computed values. It is missing the horizontal spatial dimensions,
the number of dimensions decreases by two. The dimension properties (name, type, labels, reference
system and resolution) for all other dimensions remain unchanged.
"""
return reduce_spatial(data=self, reducer=reducer, context=context)
def rename_dimension(self, source, target) -> 'ProcessBuilder':
"""
Rename a dimension
:param self: The data cube.
:param source: The current name of the dimension. Fails with a `DimensionNotAvailable` exception if the
specified dimension does not exist.
:param target: A new Name for the dimension. Fails with a `DimensionExists` exception if a dimension
with the specified name exists.
:return: A data cube with the same dimensions, but the name of one of the dimensions changes. The old
name can not be referred to any longer. The dimension properties (name, type, labels, reference system
and resolution) remain unchanged.
"""
return rename_dimension(data=self, source=source, target=target)
def rename_labels(self, dimension, target, source=UNSET) -> 'ProcessBuilder':
"""
Rename dimension labels
:param self: The data cube.
:param dimension: The name of the dimension to rename the labels for.
:param target: The new names for the labels. The dimension labels in the data cube are expected to be
enumerated if the parameter `target` is not specified. If a target dimension label already exists in
the data cube, a `LabelExists` exception is thrown.
:param source: The names of the labels as they are currently in the data cube. The array defines an
unsorted and potentially incomplete list of labels that should be renamed to the names available in the
corresponding array elements in the parameter `target`. If one of the source dimension labels doesn't
exist, the `LabelNotAvailable` exception is thrown. By default, the array is empty so that the
dimension labels in the data cube are expected to be enumerated.
:return: The data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except that for the given dimension the labels
change. The old labels can not be referred to any longer. The number of labels remains the same.
"""
return rename_labels(data=self, dimension=dimension, target=target, source=source)
def resample_cube_spatial(self, target, method=UNSET) -> 'ProcessBuilder':
"""
Resample the spatial dimensions to match a target data cube
:param self: A data cube.
:param target: A data cube that describes the spatial target resolution.
:param method: Resampling method to use. The following options are available and are meant to align
with [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r): * `average`: average
(mean) resampling, computes the weighted average of all valid pixels * `bilinear`: bilinear resampling
* `cubic`: cubic resampling * `cubicspline`: cubic spline resampling * `lanczos`: Lanczos windowed sinc
resampling * `max`: maximum resampling, selects the maximum value from all valid pixels * `med`: median
resampling, selects the median value of all valid pixels * `min`: minimum resampling, selects the
minimum value from all valid pixels * `mode`: mode resampling, selects the value which appears most
often of all the sampled points * `near`: nearest neighbour resampling (default) * `q1`: first quartile
resampling, selects the first quartile value of all valid pixels * `q3`: third quartile resampling,
selects the third quartile value of all valid pixels * `rms` root mean square (quadratic mean) of all
valid pixels * `sum`: compute the weighted sum of all valid pixels Valid pixels are determined based
on the function ``is_valid()``.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the spatial
dimensions.
"""
return resample_cube_spatial(data=self, target=target, method=method)
def resample_cube_temporal(self, target, method, dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Resample a temporal dimension to match a target data cube
:param self: A data cube.
:param target: A data cube that describes the temporal target resolution.
:param method: A resampling method to be applied, could be a reducer for downsampling or other methods
for upsampling. A reducer is a single process such as ``mean()`` or a set of processes, which computes
a single value for a list of values, see the category 'reducer' for such processes.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in
both data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only
have one temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails
with a `DimensionNotAvailable` error if the specified dimension does not exist.
:param context: Additional data to be passed to the process specified for the parameter `method`.
:return: A raster data cube with the same dimensions and the same dimension properties (name, type,
labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension
the name and type remain unchanged, but the reference system changes and the labels and resolution may
change.
"""
return resample_cube_temporal(data=self, target=target, method=method, dimension=dimension, context=context)
def resample_cube_temporal(self, target, dimension=UNSET, valid_within=UNSET) -> 'ProcessBuilder':
"""
Resample temporal dimensions to match a target data cube
:param self: A data cube with one or more temporal dimensions.
:param target: A data cube that describes the temporal target resolution.
:param dimension: The name of the temporal dimension to resample, which must exist with this name in
both data cubes. If the dimension is not set or is set to `null`, the process resamples all temporal
dimensions that exist with the same names in both data cubes. The following exceptions may occur: * A
dimension is given, but it does not exist in any of the data cubes: `DimensionNotAvailable` * A
dimension is given, but one of them is not temporal: `DimensionMismatch` * No specific dimension name
is given and there are no temporal dimensions with the same name in the data: `DimensionMismatch`
:param valid_within: Setting this parameter to a numerical value enables that the process searches for
valid values within the given period of days before and after the target timestamps. Valid values are
determined based on the function ``is_valid()``. For example, the limit of `7` for the target
timestamps `2020-01-15 12:00:00` looks for a nearest neighbor after `2020-01-08 12:00:00` and before
`2020-01-22 12:00:00`. If no valid value is found within the given period, the value will be set to no-
data (`null`).
:return: A raster data cube with the same dimensions and the | |
<filename>ruledxml/core.py
#!/usr/bin/env python3
"""
ruledxml.core
-------------
Core implementation for application of rules to XML files.
It covers the following steps:
1. Read rules file
2. Retrieve source XML file
3. Do required elements exist?
4. Apply rules
5. Write resulting XML to file
(C) 2015, meisterluk, BSD 3-clause license
"""
import re
import sys
import os.path
import logging
import pathlib
import argparse
import importlib.machinery
import collections
import lxml.etree
from . import fs
from . import xml
from . import exceptions
def unique_function(filepath: str):
"""Quick&dirty check that every python rule function has a unique name.
:param filepath: Filepath to a python file
:type filepath: str
:raises RuledXmlException: if a function name is defined twice
"""
pattern = re.compile("def\s+(rule\w+)")
functions = {}
errmsg = ("Function name {} is defined multiple times "
"in file {} (lines {} and {})")
with open(filepath) as fp:
for lineno, line in enumerate(fp):
m = pattern.match(line)
if not m:
continue
name = m.group(1)
if name in functions:
first = functions[name]
msg = errmsg.format(name, filepath, first, lineno)
raise exceptions.RuledXmlException(msg)
functions[name] = lineno
logging.info("Found {} at line {}".format(name, lineno))
def required_exists(dom: lxml.etree.Element, nonempty=None, required=None, *, filepath=''):
"""Validate `required` and `nonempty` fields.
ie. raise InvalidPathException if path does not exist in `dom`.
:param dom: the root element of a DOM to validate
:type dom: lxml.etree.Element
:param nonempty: set of paths with nonempty values
:type nonempty: set
:param required: set of required paths
:type required: set
:param filepath: filepath (additional info for error message)
:type filepath: str
:raises InvalidPathException: some required path does not exist / is empty
"""
if not required:
required = set()
if not nonempty:
nonempty = set()
suffix = ""
if filepath:
suffix = " in XML file '{}'".format(filepath)
for req in required:
if not dom.xpath(req):
errmsg = 'Path {} does not exist{}'.format(req, suffix)
raise exceptions.InvalidPathException(errmsg.format(req))
for req in nonempty:
if xml.read_source(dom, req) == '':
errmsg = 'Path {} is empty{}; must contain value'.format(req, suffix)
raise exceptions.InvalidPathException(errmsg.format(req))
def read_rulesfile(filepath: str) -> tuple([dict, set]):
"""Given a `filepath`, return its contained rules and required attributes.
Raises a exceptions.RuledXmlException if file does not contain any rule.
:param filepath: A filepath in the filesystem
:type filepath: str
:return: rules (associates name to implementation) and metadata
such as required attributes, xml namespaces and encoding
:rtype: tuple(dict, dict)
"""
def modulename(path: str) -> str:
"""Return module name for a rule file in given path"""
return os.path.splitext(os.path.basename(path))[0]
logging.info('Reading rules from %s', filepath)
loader = importlib.machinery.SourceFileLoader(modulename(filepath), filepath)
rulesfile = loader.load_module()
rules = {}
metadata = {
'input_required': set(),
'input_nonempty': set(),
'input_xml_namespaces': {},
'output_encoding': 'utf-8',
'output_xml_namespaces': {}
}
tmpl = "Found %s attribute with %d elements"
for member in dir(rulesfile):
if member.startswith("rule"):
rules[member] = getattr(rulesfile, member)
logging.info("Found %s", member)
elif member in {"input_required", "input_nonempty",
"output_required", "output_nonempty"}:
metadata[member] = set(getattr(rulesfile, member))
logging.info(tmpl, member, len(metadata[member]))
elif member == "input_namespaces":
metadata['input_xml_namespaces'] = getattr(rulesfile, member)
logging.info(tmpl, "input_namespaces", len(metadata['input_xml_namespaces']))
elif member == "output_namespaces":
metadata['output_xml_namespaces'] = getattr(rulesfile, member)
logging.info(tmpl, 'output_namespaces', len(metadata['output_xml_namespaces']))
elif member == "output_encoding":
metadata['output_encoding'] = getattr(rulesfile, member)
logging.info('Attribute %s found. Is set to %s', 'output_encoding',
metadata['output_encoding'])
if not rules:
msg = "Expected at least one rule definition, none given in {}"
raise exceptions.RuledXmlException(msg.format(filepath))
logging.debug('metadata found: %s', str(metadata))
return rules, metadata
def validate_rules(rules: dict):
"""Validate rules. Test whether decorator setup is fine for all of them.
:param rules: rule names associated to their implementation
:type rules: dict(str: function)
:raises RuledXmlException: a decorator is set up wrongfully
"""
for rulename, rule in rules.items():
# a rule must have @source, @destination or @foreach applied
if not hasattr(rule, 'metadata'):
msg = ("Function {} is considered to be a rule, but it "
"requires at least a @destination declaration")
raise exceptions.InvalidRuleDestination(msg.format(rulename))
# a rule must have at least one @destination
dst = rule.metadata.get('dst', {})
dst_len = len(dst.get('dests', []))
if dst_len != 1:
msg = "A rule must have exactly 1 @destination. {} has {}"
raise exceptions.TooManyRuleDestinations(msg.format(rulename, dst_len))
# distinguish: foreach, no-foreach
if 'each' in rule.metadata:
each_len = len(rule.metadata['each'])
if each_len == 0:
msg = "A @foreach rule requires at least 2 arguments. {} has 0"
raise exceptions.InvalidRuleForeach(msg.format(rulename))
each_arg_len = len(rule.metadata['each'][0])
if each_arg_len != 2:
msg = "@foreach must have exactly two arguments. {} has {}"
raise exceptions.InvalidRuleForeach(msg.format(rulename, each_arg_len))
each_dst_len = len(rule.metadata.get('dst', {}).get('dsts', []))
if each_dst_len > 1:
msg = ("@foreach rules must have at most "
"1 @destination. {} has {}")
raise exceptions.InvalidRuleForeach(msg.format(rulename, each_dst_len))
# outer @foreach[0] must be prefix of innner @foreach[0]
destination = rule.metadata['dst']['dests'][0]
for source in rule.metadata.get('src', []):
prev_base_src = None
for base_src, base_dst in rule.metadata['each']:
if prev_base_src is not None and not base_src.startswith(prev_base_src):
msg = ("Outer first @foreach argument '{}' must be prefix of "
"inner first @foreach argument '{}'")
raise exceptions.InvalidRuleForeach(msg.format(prev_base_src, base_src))
prev_base_src = base_src
else:
pass # no further checks
def build_recursive_structure(rules: list) -> list:
"""Build a recursive structure based on foreach-sources of the given rules.
>>> rule1 = {
... 'foreach': [('a', 'x'), ('ab', 'xy')],
... 'source': ['1', 'a2', 'ab3', 'ab4'],
... 'destination': ['xy5']
... }
>>> rule2 = {
... 'foreach': [('a', 'x'), ('ac', 'xz')],
... 'source': ['6'],
... 'destination': ['xz7']
... }
>>> build_recursive_structure([rule1, rule2])
[{
'class': 'iteration',
'dstbase': 'x',
'srcbase': 'a',
'children': [
{ 'class': 'iteration',
'dstbase': 'xy',
'srcbase': 'ab',
'children': []
}, {'class': 'iteration',
'dstbase': 'xz',
'srcbase': 'ac',
'children': []
}]
}]
:param rules: a set of rules to read @foreach attributes from
:type rules: list
:return: a list of (potentially nested) dictionaries
:rtype: list
"""
all_each_bases = set()
for rule in rules:
for each in rule['foreach']:
all_each_bases.add(each)
all_each_bases = list(all_each_bases)
all_each_bases.sort(key=lambda e: e[0])
structure = []
for base in all_each_bases:
current = structure
added = False
while not added:
for element in current:
if base[0].startswith(element['srcbase']):
current = element['children']
break
else:
childs = []
current.append({
'class': 'iteration',
'srcbase': base[0],
'dstbase': base[1],
'children': childs
})
current = childs
added = True
return structure
def classify_rules(rules: dict):
"""Classify rules. Represent rules as dictionary with associated metadata.
Returns a data structure with is nicely structured to perform the @source
and @destination algorithms with respect to @foreach semantics.
:param rules: rule names associated to their implementation
:type rules: dict(str: function)
:return: a list of dictionaries containing rules with metadata;
might be recursive (dicts contain lists of dicts)
:rtype: [dict(), dict(), ...]
"""
classified = []
max_user_dorder = None
# add basic rules
each_found = False
for rulename, rule in rules.items():
if 'each' in rule.metadata:
each_found = True
continue
user_dorder = rule.metadata.get('dst', {}).get('order')
if user_dorder is not None:
user_dorder = int(user_dorder)
if max_user_dorder is None or (user_dorder is not None and user_dorder > max_user_dorder):
max_user_dorder = user_dorder
classified.append({
'name': rulename,
'class': 'basicrule',
'rule': rule,
'each': [],
'src': rule.metadata.get('src', []),
'dst': rule.metadata.get('dst', {}).get('dests', []),
'dorder': user_dorder
})
if max_user_dorder is None:
max_user_dorder = 0
# assign destination orders not defined by user
for ruledata in classified:
if ruledata['dorder'] is None:
max_user_dorder += 1
ruledata['dorder'] = max_user_dorder
if not each_found:
return classified
# collect the set of all @foreach base sources
foreach_rules = []
for rulename, rule in rules.items():
if 'each' not in rule.metadata:
continue
dorder = rule.metadata.get('dst', {}).get('dorder')
if dorder is None:
max_user_dorder += 1
dorder = max_user_dorder
foreach_rules.append({
'foreach': rule.metadata.get('each', []),
'source': rule.metadata.get('src', []),
'destination': rule.metadata.get('dst', {}).get('dests', []),
'dorder': dorder
})
# build recursive structure for @foreach entries
# node tell when an ambiguous element has to be iterated
recursive_structure = build_recursive_structure(foreach_rules)
# annotate rules to it
def traverse(tree, xpath):
# Assumption. xpath exists as base in tree.
assert xpath
current = tree
found = False
while not found:
for element in current:
if xpath == element['srcbase']:
return element['children']
if xpath.startswith(element['srcbase']):
current = element['children']
# add the rules to the recursive structure
for rulename, rule in rules.items():
if 'each' not in rule.metadata:
continue
dorder = rule.metadata.get('dst', {}).get('dorder')
if dorder is None:
max_user_dorder += 1
dorder = max_user_dorder
most_nested = rule.metadata['each'][-1]
lst = traverse(recursive_structure, most_nested[0])
lst.append({
'class': 'foreach-rule',
'name': rulename,
'src': rule.metadata.get('src', []),
'dst': rule.metadata.get('dst', {}).get('dests', []),
'dorder': dorder,
'rule': rule
})
for struct in recursive_structure:
classified.append(struct)
return classified
def reorder_rules(rules: dict):
| |
<reponame>MagicCameralife/BlueCleaner<gh_stars>0
# Copyright 2014 Swisscom, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import uuid
from collections import namedtuple
from django.core.exceptions import SuspiciousOperation, ObjectDoesNotExist
from django.db import models, router, transaction
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields.related import ForeignKey
from django.db.models.query import QuerySet, ModelIterable
from django.db.models.sql.datastructures import Join
from django.db.models.sql.query import Query
from django.db.models.sql.where import WhereNode
from django.utils import six
from django.utils.timezone import utc
from versions.exceptions import DeletionOfNonCurrentVersionError
from versions.settings import get_versioned_delete_collector_class, \
settings as versions_settings
from versions.util import get_utc_now
def get_utc_now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
def validate_uuid(uuid_obj):
"""
Check that the UUID object is in fact a valid version 4 uuid.
"""
return isinstance(uuid_obj, uuid.UUID) and uuid_obj.version == 4
QueryTime = namedtuple('QueryTime', 'time active')
class ForeignKeyRequiresValueError(ValueError):
pass
class VersionManager(models.Manager):
"""
This is the Manager-class for any class that inherits from Versionable
"""
use_for_related_fields = True
def get_queryset(self):
"""
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
"""
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs
def as_of(self, time=None):
"""
Filters Versionables at a given time
:param time: The timestamp (including timezone info) at which
Versionables shall be retrieved
:return: A QuerySet containing the base for a timestamped query.
"""
return self.get_queryset().as_of(time)
def next_version(self, object, relations_as_of='end'):
"""
Return the next version of the given object.
In case there is no next object existing, meaning the given
object is the current version, the function returns this version.
Note that if object's version_end_date is None, this does not check
the database to see if there is a newer version (perhaps created by
some other code), it simply returns the passed object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details
on valid ``relations_as_of`` values.
:param Versionable object: object whose next version will be returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:return: Versionable
"""
if object.version_end_date is None:
next = object
else:
next = self.filter(
Q(identity=object.identity),
Q(version_start_date__gte=object.version_end_date)
).order_by('version_start_date').first()
if not next:
raise ObjectDoesNotExist(
"next_version couldn't find a next version of object " +
str(object.identity))
return self.adjust_version_as_of(next, relations_as_of)
def previous_version(self, object, relations_as_of='end'):
"""
Return the previous version of the given object.
In case there is no previous object existing, meaning the given object
is the first version of the object, then the function returns this
version.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose previous version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:return: Versionable
"""
if object.version_birth_date == object.version_start_date:
previous = object
else:
previous = self.filter(
Q(identity=object.identity),
Q(version_end_date__lte=object.version_start_date)
).order_by('-version_end_date').first()
if not previous:
raise ObjectDoesNotExist(
"previous_version couldn't find a previous version of "
"object " + str(object.identity))
return self.adjust_version_as_of(previous, relations_as_of)
def current_version(self, object, relations_as_of=None, check_db=False):
"""
Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
"""
if object.version_end_date is None and not check_db:
current = object
else:
current = self.current.filter(identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of)
@staticmethod
def adjust_version_as_of(version, relations_as_of):
"""
Adjusts the passed version's as_of time to an appropriate value, and
returns it.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations.
Valid ``relations_as_of`` values and how this affects the returned
version's as_of attribute:
- 'start': version start date
- 'end': version end date - 1 microsecond (no effect if version is
current version)
- datetime object: given datetime (raises ValueError if given datetime
not valid for version)
- None: unset (related object queries will not be restricted to a
point in time)
:param Versionable object: object whose as_of will be adjusted as
requested.
:param mixed relations_as_of: valid values are the strings 'start' or
'end', or a datetime object.
:return: Versionable
"""
if not version:
return version
if relations_as_of == 'end':
if version.is_current:
# Ensure that version._querytime is active, in case it wasn't
# before.
version.as_of = None
else:
version.as_of = version.version_end_date - datetime.timedelta(
microseconds=1)
elif relations_as_of == 'start':
version.as_of = version.version_start_date
elif isinstance(relations_as_of, datetime.datetime):
as_of = relations_as_of.astimezone(utc)
if not as_of >= version.version_start_date:
raise ValueError(
"Provided as_of '{}' is earlier than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_start_date.isoformat()
)
)
if version.version_end_date is not None \
and as_of >= version.version_end_date:
raise ValueError(
"Provided as_of '{}' is later than version's start "
"time '{}'".format(
as_of.isoformat(),
version.version_end_date.isoformat()
)
)
version.as_of = as_of
elif relations_as_of is None:
version._querytime = QueryTime(time=None, active=False)
else:
raise TypeError(
"as_of parameter must be 'start', 'end', None, or datetime "
"object")
return version
@property
def current(self):
return self.as_of(None)
def create(self, **kwargs):
"""
Creates an instance of a Versionable
:param kwargs: arguments used to initialize the class instance
:return: a Versionable instance of the class
"""
return self._create_at(None, **kwargs)
def _create_at(self, timestamp=None, id=None, forced_identity=None,
**kwargs):
"""
WARNING: Only for internal use and testing.
Create a Versionable having a version_start_date and
version_birth_date set to some pre-defined timestamp
:param timestamp: point in time at which the instance has to be created
:param id: version 4 UUID unicode object. Usually this is not
specified, it will be automatically created.
:param forced_identity: version 4 UUID unicode object. For internal
use only.
:param kwargs: arguments needed for initializing the instance
:return: an instance of the class
"""
id = Versionable.uuid(id)
if forced_identity:
ident = Versionable.uuid(forced_identity)
else:
ident = id
if timestamp is None:
timestamp = get_utc_now()
kwargs['id'] = id
kwargs['identity'] = ident
kwargs['version_start_date'] = timestamp
kwargs['version_birth_date'] = timestamp
return super(VersionManager, self).create(**kwargs)
class VersionedWhereNode(WhereNode):
def as_sql(self, qn, connection):
"""
This method identifies joined table aliases in order for
VersionedExtraWhere.as_sql() to be able to add time restrictions for
those tables based on the VersionedQuery's querytime value.
:param qn: In Django 1.7 & 1.8 this is a compiler
:param connection: A DB connection
:return: A tuple consisting of (sql_string, result_params)
"""
# self.children is an array of VersionedExtraWhere-objects
from versions.fields import VersionedExtraWhere
for child in self.children:
if isinstance(child, VersionedExtraWhere) and not child.params:
_query = qn.query
query_time = _query.querytime.time
apply_query_time = _query.querytime.active
alias_map = _query.alias_map
self._set_child_joined_alias(child, alias_map)
if apply_query_time:
# Add query parameters that have not been added till now
child.set_as_of(query_time)
else:
# Remove the restriction if it's not required
child.sqls = []
return super(VersionedWhereNode, self).as_sql(qn, connection)
@staticmethod
def _set_child_joined_alias(child, alias_map):
"""
Set the joined alias on the child, for Django >= 1.8.0
:param child:
:param alias_map:
"""
for table in alias_map:
join = alias_map[table]
if not | |
# VMware vCloud Director Python SDK
# Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyvcloud.vcd.client import ApiVersion
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import find_link
from pyvcloud.vcd.client import IpAddressMode
from pyvcloud.vcd.client import MetadataDomain
from pyvcloud.vcd.client import MetadataValueType
from pyvcloud.vcd.client import MetadataVisibility
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import QueryResultFormat
from pyvcloud.vcd.client import RelationType
from pyvcloud.vcd.client import ResourceType
from pyvcloud.vcd.client import VCLOUD_STATUS_MAP
from pyvcloud.vcd.client import VmNicProperties
from pyvcloud.vcd.exceptions import EntityNotFoundException
from pyvcloud.vcd.exceptions import InvalidParameterException
from pyvcloud.vcd.exceptions import InvalidStateException
from pyvcloud.vcd.exceptions import MultipleRecordsException
from pyvcloud.vcd.exceptions import OperationNotSupportedException
from pyvcloud.vcd.metadata import Metadata
from pyvcloud.vcd.utils import retrieve_compute_policy_id_from_href
from pyvcloud.vcd.utils import uri_to_api_uri
VDC_COMPUTE_POLICY_MIN_API_VERSION = float(ApiVersion.VERSION_32.value)
VDC_COMPUTE_POLICY_MAX_API_VERSION = float(ApiVersion.VERSION_33.value)
VM_SIZING_POLICY_MIN_API_VERSION = float(ApiVersion.VERSION_33.value)
class VM(object):
"""A helper class to work with Virtual Machines."""
def __init__(self, client, href=None, resource=None):
"""Constructor for VM object.
:param pyvcloud.vcd.client.Client client: the client that will be used
to make REST calls to vCD.
:param str href: href of the vm.
:param lxml.objectify.ObjectifiedElement resource: object containing
EntityType.VM XML data representing the vm.
"""
self.client = client
if href is None and resource is None:
raise InvalidParameterException(
'VM initialization failed as arguments are either invalid or'
' None')
self.href = href
self.resource = resource
if resource is not None:
self.href = resource.get('href')
def get_resource(self):
"""Fetches the XML representation of the vm from vCD.
Will serve cached response if possible.
:return: object containing EntityType.VM XML data representing the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
if self.resource is None:
self.reload()
return self.resource
def reload(self):
"""Reloads the resource representation of the vm.
This method should be called in between two method invocations on the
VM object, if the former call changes the representation of the
vm in vCD.
"""
self.resource = self.client.get_resource(self.href)
if self.resource is not None:
self.href = self.resource.get('href')
def get_vc(self):
"""Returns the vCenter where this vm is located.
:return: name of the vCenter server.
:rtype: str
"""
self.get_resource()
for record in self.resource.VCloudExtension[
'{' + NSMAP['vmext'] + '}VmVimInfo'].iterchildren():
if hasattr(record, '{' + NSMAP['vmext'] + '}VimObjectType'):
if 'VIRTUAL_MACHINE' == record.VimObjectType.text:
return record.VimServerRef.get('name')
return None
def get_cpus(self):
"""Returns the number of CPUs in the vm.
:return: number of cpus (int) and number of cores per socket (int) of
the vm.
:rtype: dict
"""
self.get_resource()
return {
'num_cpus':
int(self.resource.VmSpecSection.NumCpus.text),
'num_cores_per_socket':
int(self.resource.VmSpecSection.NumCoresPerSocket.text)
}
def get_memory(self):
"""Returns the amount of memory in MB.
:return: amount of memory in MB.
:rtype: int
"""
self.get_resource()
return int(
self.resource.VmSpecSection.MemoryResourceMb.Configured.text)
def update_compute_policy(self, compute_policy_href, compute_policy_id=None): # noqa: E501
"""Updates the Compute Policy of this VM.
For api v32, the VdcComputePolicy element is modified. For api v33 and
above, VmSizingPolicy is modified.
:param str compute_policy_href: href of the target compute policy
:param str compute_policy_id: id of the target compute policy. This is
a redundant param, keeping it, to preserve compatibility with
older clients using this method.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that updates the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
api_version = float(self.client.get_api_version())
if api_version < VDC_COMPUTE_POLICY_MIN_API_VERSION:
raise OperationNotSupportedException(
f"Unsupported API version. Received '{api_version}' but "
f"'{VDC_COMPUTE_POLICY_MIN_API_VERSION}' is required.")
if not compute_policy_id:
compute_policy_id = \
retrieve_compute_policy_id_from_href(compute_policy_href)
vm_resource = self.get_resource()
if api_version <= VDC_COMPUTE_POLICY_MAX_API_VERSION:
vm_resource.VdcComputePolicy.set('href', compute_policy_href)
vm_resource.VdcComputePolicy.set('id', compute_policy_id)
if api_version >= VM_SIZING_POLICY_MIN_API_VERSION:
vm_resource.ComputePolicy.VmSizingPolicy.set('href',
compute_policy_href)
vm_resource.ComputePolicy.VmSizingPolicy.set('id',
compute_policy_id)
reconfigure_vm_link = find_link(self.resource,
RelationType.RECONFIGURE_VM,
EntityType.VM.value)
return self.client.post_resource(reconfigure_vm_link.href,
vm_resource,
EntityType.VM.value)
def modify_cpu(self, virtual_quantity, cores_per_socket=None):
"""Updates the number of CPUs of a vm.
:param int virtual_quantity: number of virtual CPUs to configure on the
vm.
:param int cores_per_socket: number of cores per socket.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that updates the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
uri = self.href + '/virtualHardwareSection/cpu'
if cores_per_socket is None:
cores_per_socket = virtual_quantity
item = self.client.get_resource(uri)
item['{' + NSMAP['rasd'] + '}ElementName'] = \
'%s virtual CPU(s)' % virtual_quantity
item['{' + NSMAP['rasd'] + '}VirtualQuantity'] = virtual_quantity
item['{' + NSMAP['vmw'] + '}CoresPerSocket'] = cores_per_socket
return self.client.put_resource(uri, item, EntityType.RASD_ITEM.value)
def modify_memory(self, virtual_quantity):
"""Updates the memory of a vm.
:param int virtual_quantity: number of MB of memory to configure on the
vm.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that updates the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
uri = self.href + '/virtualHardwareSection/memory'
item = self.client.get_resource(uri)
item['{' + NSMAP['rasd'] + '}ElementName'] = \
'%s virtual CPU(s)' % virtual_quantity
item['{' + NSMAP['rasd'] + '}VirtualQuantity'] = virtual_quantity
return self.client.put_resource(uri, item, EntityType.RASD_ITEM.value)
def get_power_state(self, vm_resource=None):
"""Returns the status of the vm.
:param lxml.objectify.ObjectifiedElement vm_resource: object
containing EntityType.VM XML data representing the vm whose
power state we want to retrieve.
:return: The status of the vm, the semantics of the value returned is
captured in pyvcloud.vcd.client.VCLOUD_STATUS_MAP
:rtype: int
"""
if vm_resource is None:
vm_resource = self.get_resource()
return int(vm_resource.get('status'))
def is_powered_on(self, vm_resource=None):
"""Checks if a vm is powered on or not.
:param lxml.objectify.ObjectifiedElement vm_resource: object
containing EntityType.VM XML data representing the vm whose
power state we want to check.
:return: True if the vm is powered on else False.
:rtype: bool
"""
return self.get_power_state(vm_resource) == 4
def is_powered_off(self, vm_resource=None):
"""Checks if a vm is powered off or not.
:param lxml.objectify.ObjectifiedElement vm_resource: object
containing EntityType.VM XML data representing the vm whose
power state we want to check.
:return: True if the vm is powered off else False.
:rtype: bool
"""
return self.get_power_state(vm_resource) == 8
def is_suspended(self, vm_resource=None):
"""Checks if a vm is suspended or not.
:param lxml.objectify.ObjectifiedElement vm_resource: object
containing EntityType.VM XML data representing the vm whose
power state we want to check.
:return: True if the vm is suspended else False.
:rtype: bool
"""
return self.get_power_state(vm_resource) == 3
def is_deployed(self, vm_resource=None):
"""Checks if a vm is deployed or not.
:param lxml.objectify.ObjectifiedElement vm_resource: object
containing EntityType.VM XML data representing the vm whose
power state we want to check.
:return: True if the vm is deployed else False.
:rtype: bool
"""
return self.get_power_state(vm_resource) == 2
def _perform_power_operation(self,
rel,
operation_name,
media_type=None,
contents=None):
"""Perform a power operation on the vm.
Perform one of the following power operations on the vm.
Power on, Power off, Deploy, Undeploy, Shutdown, Reboot, Power reset.
:param pyvcloud.vcd.client.RelationType rel: relation of the link in
the vm resource that will be triggered for the power operation.
:param str operation_name: name of the power operation to perform. This
value will be used while logging error messages (if any).
:param str media_type: media type of the link in
the vm resource that will be triggered for the power operation.
:param lxml.objectify.ObjectifiedElement contents: payload for the
linked operation.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that is tracking the power operation on the
vm.
:rtype: lxml.objectify.ObjectifiedElement
:raises OperationNotSupportedException: if the power operation can't be
performed on the vm.
"""
vm_resource = self.get_resource()
try:
return self.client.post_linked_resource(vm_resource, rel,
media_type, contents)
except OperationNotSupportedException:
power_state = self.get_power_state(vm_resource)
raise OperationNotSupportedException(
'Can\'t {0} vm. Current state of vm: {1}.'.format(
operation_name, VCLOUD_STATUS_MAP[power_state]))
def shutdown(self):
"""Shutdown the vm.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task shutting down the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
return self._perform_power_operation(
rel=RelationType.POWER_SHUTDOWN, operation_name='shutdown')
def reboot(self):
"""Reboots the vm.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task rebooting the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
return self._perform_power_operation(
rel=RelationType.POWER_REBOOT, operation_name='reboot')
def power_on(self):
"""Powers on the vm.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that is powering on the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
return self._perform_power_operation(
rel=RelationType.POWER_ON, operation_name='power on')
def power_off(self):
"""Powers off the vm.
:return: an object containing EntityType.TASK XML data which represents
the asynchronous task that is powering off the vm.
:rtype: lxml.objectify.ObjectifiedElement
"""
return self._perform_power_operation(
rel=RelationType.POWER_OFF, operation_name='power off')
def power_reset(self):
"""Powers reset the vm.
:return: an object containing EntityType.TASK XML data which represents
| |
"50141",
"50149",
"50150",
"50156",
"50162",
"50163",
"50164",
"50169",
"50191",
"50194",
"50204",
"50216",
"50236",
"50238",
"50244",
"50245",
"50254",
"50262",
"50268",
"50275",
"50296",
"50297",
"50302",
"50307",
"50309",
"50311",
"50314",
"50317",
"50319",
"50320",
"50325",
"50330",
"50336",
"50344",
"50348",
"50369",
"50382",
"50386",
"50399",
"50408",
"50411",
"50414",
"50418",
"50419",
"50428",
"50445",
"50451",
"50468",
"50485",
"50492",
"50499",
"50514",
"50519",
"50525",
"50526",
"50534",
"50540",
"50573",
"50580",
"50587",
"50589",
"50613",
"50618",
"50631",
"50634",
"50635",
"50636",
"50663",
"50679",
"50702",
"50725",
"50728",
"50732",
"50744",
"50757",
"50765",
"50766",
"50817",
"50844",
"50851",
"50852",
"50854",
"50870",
"50894",
"50899",
"50913",
"50919",
"50945",
"50965",
"50967",
"50974",
"50982",
"51075",
"51083",
"51087",
"51104",
"51135",
"51140",
"51143",
"51147",
"51150",
"51160",
"51176",
"51178",
"51184",
"51185",
"51187",
"51201",
"51204",
"51206",
"51209",
"51216",
"51218",
"51219",
"51229",
"51233",
"51254",
"51255",
"51278",
"51286",
"51289",
"51320",
"51327",
"51338",
"51345",
"51355",
"51364",
"51379",
"51387",
"51398",
"51400",
"51403",
"51409",
"51421",
"51423",
"51425",
"51479",
"51513",
"51521",
"51525",
"51529",
"51545",
"51549",
"51581",
"51590",
"51606",
"51620",
"51621",
"51639",
"51649",
"51651",
"51654",
"51669",
"51675",
"51686",
"51692",
"51711",
"51715",
"51721",
"51732",
"51733",
"51749",
"51757",
"51764",
"51771",
"51777",
"51781",
"51782",
"51795",
"51797",
"51802",
"51818",
"51821",
"51824",
"51830",
"51838",
"51844",
"51851",
"51852",
"51854",
"51869",
"51883",
"51900",
"51904",
"51933",
"51934",
"51944",
"51952",
"51985",
"52005",
"52022",
"52024",
"52033",
"52044",
"52046",
"52082",
"52095",
"52105",
"52106",
"52123",
"52124",
"52139",
"52145",
"52150",
"52160",
"52185",
"52193",
"52205",
"52210",
"52219",
"52224",
"52230",
"52244",
"52255",
"52276",
"52281",
"52284",
"52291",
"52293",
"52297",
"52324",
"52352",
"52366",
"52389",
"52400",
"52403",
"52444",
"52455",
"52459",
"52469",
"52477",
"52503",
"52522",
"52532",
"52539",
"52562",
"52565",
"52621",
"52629",
"52645",
"52648",
"52668",
"52686",
"52698",
"52704",
"52711",
"52724",
"52729",
"52731",
"52735",
"52738",
"52758",
"52808",
"52812",
"52814",
"52819",
"52834",
"52884",
"52898",
"52917",
"52924",
"52935",
"53003",
"53027",
"53054",
"53075",
"53094",
"53100",
"53109",
"53175",
"53188",
"53191",
"53199",
"53235",
"53252",
"53287",
"53289",
"53292",
"53298",
"53301",
"53308",
"53318",
"53342",
"53360",
"53368",
"53372",
"53373",
"53394",
"53405",
"53425",
"53445",
"53447",
"53451",
"53481",
"53511",
"53521",
"53524",
"53530",
"53540",
"53592",
"53595",
"53604",
"53607",
"53617",
"53622",
"53628",
"53629",
"53647",
"53664",
"53683",
"53704",
"53764",
"53788",
"53790",
"53804",
"53811",
"53824",
"53830",
"53834",
"53845",
"53868",
"53879",
"53918",
"53941",
"53949",
"53951",
"53957",
"53958",
"53959",
"53971",
"53984",
"53989",
"54027",
"54029",
"54039",
"54045",
"54062",
"54072",
"54074",
"54088",
"54090",
"54092",
"54098",
"54104",
"54106",
"54107",
"54120",
"54136",
"54145",
"54146",
"54153",
"54167",
"54169",
"54174",
"54188",
"54190",
"54212",
"54219",
"54240",
"54244",
"54253",
"54254",
"54255",
"54267",
"54273",
"54286",
"54289",
"54312",
"54324",
"54338",
"54355",
"54356",
"54360",
"54369",
"54372",
"54387",
"54408",
"54422",
"54429",
"54435",
"54462",
"54464",
"54466",
"54484",
"54489",
"54525",
"54536",
"54546",
"54554",
"54561",
"54569",
"54592",
"54593",
"54597",
"54602",
"54614",
"54629",
"54642",
"54683",
"54686",
"54689",
"54694",
"54742",
"54744",
"54748",
"54749",
"54752",
"54760",
"54769",
"54820",
"54843",
"54851",
"54854",
"54919",
"54925",
"54966",
"54975",
"54980",
"54981",
"54993",
"55018",
"55038",
"55079",
"55085",
"55097",
"55114",
"55115",
"55119",
"55124",
"55143",
"55167",
"55183",
"55236",
"55247",
"55258",
"55272",
"55274",
"55281",
"55290",
"55294",
"55319",
"55325",
"55350",
"55354",
"55386",
"55391",
"55392",
"55402",
"55413",
"55420",
"55423",
"55436",
"55442",
"55458",
"55460",
"55464",
"55465",
"55469",
"55491",
"55495",
"55501",
"55508",
"55521",
"55532",
"55547",
"55553",
"55566",
"55576",
"55580",
"55591",
"55605",
"55615",
"55621",
"55623",
"55625",
"55642",
"55669",
"55676",
"55681",
"55695",
"55709",
"55717",
"55743",
"55752",
"55781",
"55798",
"55799",
"55802",
"55804",
"55817",
"55819",
"55843",
"55858",
"55867",
"55888",
"55894",
"55903",
"55922",
"55933",
"55945",
"55957",
"55965",
"55966",
"55975",
"55980",
"55985",
"55987",
"55990",
"56000",
"56033",
"56045",
"56053",
"56060",
"56074",
"56075",
"56088",
"56097",
"56098",
"56115",
"56134",
"56144",
"56156",
"56192",
"56196",
"56235",
"56237",
"56239",
"56240",
"56245",
"56261",
"56277",
"56282",
"56284",
"56291",
"56294",
"56340",
"56343",
"56364",
"56386",
"56389",
"56397",
"56431",
"56454",
"56457",
"56475",
"56497",
"56510",
"56517",
"56534",
"56539",
"56549",
"56550",
"56552",
"56557",
"56575",
"56594",
"56607",
"56614",
"56622",
"56653",
"56668",
"56693",
"56696",
"56697",
"56699",
"56702",
"56704",
"56708",
"56719",
"56723",
"56747",
"56787",
"56788",
"56796",
"56798",
"56820",
"56830",
"56853",
"56867",
"56871",
"56888",
"56893",
"56896",
"56909",
"56913",
"56933",
"56938",
"56944",
"56955",
"56971",
"56984",
"56992",
"56995",
"56997",
"57018",
"57021",
"57022",
"57023",
"57042",
"57057",
"57066",
"57113",
"57137",
"57149",
"57172",
"57175",
"57207",
"57237",
"57253",
"57272",
"57280",
"57292",
"57317",
"57319",
"57322",
"57333",
"57334",
"57354",
"57377",
"57389",
"57395",
"57397",
"57411",
"57422",
"57441",
"57529",
"57534",
"57537",
"57538",
"57564",
"57570",
"57595",
"57609",
"57644",
"57646",
"57658",
"57671",
"57682",
"57683",
"57687",
"57702",
"57713",
"57721",
"57743",
"57745",
"57746",
"57748",
"57751",
"57752",
"57759",
"57783",
"57820",
"57835",
"57838",
"57841",
"57845",
"57880",
"57896",
"57903",
"57932",
"57934",
"57936",
"57940",
"57941",
"57951",
"57991",
"57995",
"57999",
"58001",
"58003",
"58014",
"58041",
"58048",
"58054",
"58057",
"58059",
"58075",
"58087",
"58095",
"58109",
"58119",
"58124",
"58137",
"58140",
"58163",
"58164",
"58165",
"58167",
"58205",
"58209",
"58222",
"58223",
"58249",
"58252",
"58257",
"58274",
"58280",
"58283",
"58336",
"58337",
"58339",
"58342",
"58354",
"58370",
"58371",
"58375",
"58389",
"58391",
"58423",
"58430",
"58434",
"58466",
"58482",
"58496",
"58522",
"58525",
"58529",
"58548",
"58556",
"58567",
"58578",
"58579",
"58582",
"58587",
"58589",
"58593",
"58595",
"58605",
"58617",
"58637",
"58654",
"58721",
"58722",
"58754",
"58767",
"58773",
"58784",
"58785",
"58811",
"58828",
"58862",
"58877",
"58883",
"58899",
"58903",
"58909",
"58924",
"58930",
"58935",
"58948",
"58971",
"58988",
"58994",
"59003",
"59037",
"59044",
"59089",
"59094",
"59101",
"59105",
"59120",
"59145",
"59162",
"59165",
"59188",
"59193",
"59230",
"59231",
"59236",
"59237",
"59252",
"59275",
"59303",
"59305",
"59306",
"59309",
"59310",
"59318",
"59343",
"59351",
"59352",
"59376",
"59387",
"59391",
"59398",
"59431",
"59452",
"59454",
"59471",
"59515",
"59530",
"59532",
"59534",
"59550",
"59553",
"59566",
"59568",
"59583",
"59584",
"59611",
"59614",
"59634",
"59642",
"59651",
"59672",
"59681",
"59698",
"59726",
"59737",
"59740",
"59750",
"59787",
"59793",
"59818",
"59840",
"59843",
"59858",
"59865",
"59920",
"59938",
"59959",
"59966",
"59971",
"59981",
"60001",
"60008",
"60026",
"60029",
"60049",
"60059",
"60068",
"60069",
"60116",
"60125",
"60141",
"60149",
"60150",
"60156",
"60162",
"60163",
"60164",
"60169",
"60191",
"60194",
"60216",
"60224",
"60236",
"60238",
"60244",
"60245",
"60254",
"60262",
"60268",
"60275",
"60296",
"60297",
"60302",
"60309",
"60311",
"60314",
"60317",
"60319",
"60323",
"60325",
"60330",
"60336",
"60344",
"60348",
"60369",
"60381",
"60382",
"60383",
"60399",
"60407",
"60408",
"60411",
"60418",
"60419",
"60428",
"60445",
"60451",
"60468",
"60481",
"60485",
"60492",
"60499",
"60514",
"60519",
"60522",
"60525",
"60526",
"60534",
"60540",
"60573",
"60580",
"60586",
"60587",
"60589",
"60618",
"60631",
"60634",
"60635",
"60636",
"60647",
"60662",
"60663",
"60679",
"60681",
"60689",
"60702",
"60725",
"60728",
"60732",
"60744",
"60757",
"60765",
"60766",
"60817",
"60851",
"60852",
"60854",
"60858",
"60870",
"60875",
"60894",
"60899",
"60913",
"60918",
"60919",
"60945",
"60965",
"60982",
"60986",
"61075",
"61083",
"61087",
"61103",
"61104",
"61135",
"61140",
"61143",
"61147",
"61150",
"61160",
"61176",
"61178",
"61184",
"61185",
"61187",
"61201",
"61204",
"61206",
"61209",
"61216",
"61218",
"61219",
"61229",
"61233",
"61254",
"61255",
"61278",
"61286",
"61289",
"61320",
"61327",
"61338",
"61345",
"61355",
"61364",
"61373",
"61379",
"61383",
"61387",
"61398",
"61400",
"61403",
"61409",
"61421",
"61423",
"61425",
"61453",
"61479",
"61495",
"61521",
"61523",
"61525",
"61529",
"61545",
"61549",
"61564",
"61581",
"61590",
"61606",
"61620",
"61621",
"61632",
"61639",
"61649",
"61651",
"61654",
"61666",
"61669",
"61675",
"61685",
"61686",
"61692",
"61711",
"61715",
"61732",
"61733",
"61749",
"61757",
"61764",
"61767",
"61771",
"61777",
"61781",
"61782",
"61795",
"61797",
"61818",
"61821",
"61824",
"61830",
"61838",
"61844",
"61852",
"61854",
"61869",
"61877",
"61900",
"61904",
"61933",
"61934",
"61944",
"61952",
"62005",
"62022",
"62024",
"62033",
"62037",
"62044",
"62046",
"62063",
"62082",
"62095",
"62105",
"62106",
"62123",
"62139",
"62145",
"62150",
"62160",
"62185",
"62193",
"62205",
"62210",
"62219",
"62224",
"62230",
"62244",
"62255",
"62258",
"62276",
"62281",
"62284",
"62291",
"62293",
"62297",
"62324",
"62366",
"62389",
"62403",
"62444",
"62455",
"62459",
"62469",
"62477",
"62503",
"62522",
"62532",
"62539",
"62562",
"62565",
"62621",
"62629",
"62633",
"62645",
"62668",
"62686",
"62698",
"62711",
"62724",
"62729",
"62731",
"62735",
"62738",
"62758",
"62807",
"62808",
"62812",
"62814",
"62819",
"62834",
"62884",
"62891",
"62894",
"62898",
"62917",
"62924",
"62935",
"62952",
"63003",
"63027",
"63054",
"63075",
"63100",
"63109",
"63175",
"63188",
"63191",
"63199",
"63235",
"63289",
"63292",
"63298",
"63301",
"63318",
"63342",
"63360",
"63372",
"63373",
"63394",
"63396",
"63405",
"63425",
"63445",
"63447",
"63451",
"63481",
"63511",
"63521",
"63524",
"63540",
"63592",
"63595",
"63604",
"63607",
"63617",
"63622",
"63628",
"63629",
"63647",
"63664",
"63683",
"63764",
"63788",
"63790",
"63804",
"63824",
"63830",
"63845",
"63868",
"63879",
"63883",
"63918",
"63941",
"63949",
"63951",
"63957",
"63958",
"63959",
"63984",
"64004",
"64027",
"64029",
"64039",
"64062",
"64072",
"64074",
"64088",
"64090",
"64092",
"64098",
"64104",
"64107",
"64120",
"64136",
"64137",
"64145",
"64146",
"64153",
"64167",
"64169",
"64174",
"64188",
"64190",
"64212",
"64219",
"64240",
"64244",
"64253",
"64254",
"64255",
"64267",
"64273",
"64286",
"64289",
"64312",
"64324",
"64338",
"64355",
"64356",
"64360",
"64369",
"64372",
"64408",
"64422",
"64429",
"64435",
"64462",
"64464",
"64466",
"64477",
"64484",
"64489",
"64525",
"64536",
"64546",
"64554",
"64561",
"64569",
"64592",
"64593",
"64597",
"64602",
"64614",
"64615",
"64629",
"64642",
"64686",
"64689",
"64694",
"64742",
"64744",
"64748",
"64749",
"64752",
"64760",
"64820",
"64843",
"64851",
"64854",
"64919",
"64925",
"64975",
"64980",
"64993",
"65010",
"65018",
"65038",
"65079",
"65085",
"65097",
"65114",
"65115",
"65119",
"65124",
"65167",
"65236",
"65241",
"65247",
"65258",
"65271",
"65272",
"65274",
"65281",
"65294",
"65319",
"65325",
| |
# -*- coding: utf-8 -*-
#
# "Tic Tac Toe" Game
#
# Created by LulzLoL231 on 2/24/20.
#
from random import choice
from os import system
from platform import system as OperationSystem
from time import sleep
class Vars(object):
'''Variables class for Tic Tac Toe game\n'''
game_rules = '-= RULES =-\n1. The game is played on a grid that\'s 3 squares by 3 squares.\n2. You choice your sign (X or O), and put them in empty squares.\n3. The first player to get 3 of her signs in a row (up, down, across, or diagonally) is the winner.\n4. When all 9 squares are full, the game is over. If no player has 3 signs in a row, the game ends in a tie.'
game_help = 'Use numbers (from 1 to 9) to put your signs.\nUse "exit" for stop game & take draw.\n"rules" for Rules & "help" for Help.\nGood luck & Have fun! ^^'
game_welcome = 'Welcome to "Tic Tac Toe" game!'
game_pc_move = 'Now PC move!'
game_player_move = 'Now is YOUR move!'
game_grid_print_template = '''
| |
{} | {} | {}
| |
-----------
| |
{} | {} | {}
| |
-----------
| |
{} | {} | {}
| |\n'''
game_grid_template = {1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9'}
game_grid_template_wo_digit = {1: ' ', 2: ' ', 3: ' ', 4: ' ', 5: ' ', 6: ' ', 7: ' ', 8: ' ', 9: ' '}
game_player_sign_is = 'Your sign is "{}"'
class Utils(object):
'''Utilites class for Tic Tac Toe game.\n'''
def GetRandomSign():
'''Return random sign for game.'''
return choice(['X', 'O'])
def ClearConsole():
'''Clear console stdin using "clear" for Unix-like system, and "cls" for Windows system. Or pass if can\'t recognize os.'''
if (OperationSystem() == 'Darwin') or (OperationSystem() == 'Linux'):
system('clear')
elif OperationSystem() == 'Windows':
system('cls')
else:
pass
class GameSession(object):
'''Game Session class for Tic Tac Toe game.\n'''
def __init__(self):
'''Class init function'''
self.play = False
self.pc_score = 0
self.player_score = 0
self.draws = 0
self.pc_sign = None
self.player_sign = None
self.grid = Vars.game_grid_template.copy()
self.last_move = None
self.digit_grid = True
def DisableNumbersInGrid(self):
'''Remove numbers from grid template.'''
for i in self.grid:
if self.grid[i] == Vars.game_grid_template[i]:
self.grid[i] = ' '
else:
continue
self.digit_grid = False
return True
def EnableNumbersInGrid(self):
'''Restore numbers in grid template.'''
for i in self.grid:
if self.grid[i] == ' ':
self.grid[i] = Vars.game_grid_template[i]
else:
continue
self.digit_grid = True
return True
def GetAvailableGridPlaces(self):
'''Return Available places in grid as a list.'''
list = []
for i in self.grid:
place = self.grid[i]
if place in ['X', 'O']:
continue
else:
list.append(i)
return list
def PcRandomMove(self):
'''Return grid_place for PC move.'''
if self.pc_sign:
if self.InPlay():
return choice(self.GetAvailableGridPlaces())
raise Exception('Game is not in "play" state!')
raise Exception('"pc_sign" is not installed!')
def InstallSigns(self, player_sign, pc_sign):
'''Set players signs.'''
self.player_sign = player_sign
self.pc_sign = pc_sign
return True
def InstallWhoNextMove(self):
'''Randomly choice who make next move.'''
self.last_move = choice(['pc', 'player'])
return True
def InPlay(self):
'''Return "play" state'''
return self.play
def IsPlayerMove(self):
'''Return True if now is Player move.'''
if self.last_move == 'player':
return False
else:
return True
def GetGrid(self):
'''Return grid in template, ready for pretty print.'''
return Vars.game_grid_print_template.format(self.grid[1], self.grid[2], self.grid[3], self.grid[4], self.grid[5], self.grid[6], self.grid[7], self.grid[8], self.grid[9])
def GetScore(self):
'''Return current game score as string.'''
return f'{str(self.player_score)}:{str(self.pc_score)}:{str(self.draws)}'
def MakeMove(self, grid_place, sign):
'''Make move in game.'''
if str(grid_place).isdigit():
if int(grid_place) in range(1, 10):
if sign.upper() in ['X', 'O']:
if self.InPlay():
self.grid[int(grid_place)] = sign
return True
raise Exception('Game is not in "play" state!')
raise Exception('"sign" is not in signs list!')
raise Exception('"grid_place" is not in range!')
raise Exception('"grid_place" must be digit!')
def ResetGrid(self):
'''Reset grid to default state.'''
if self.digit_grid:
self.grid = Vars.game_grid_template.copy()
else:
self.grid = Vars.game_grid_template_wo_digit.copy()
return True
def GridIsFull(self):
'''Return True if grid has not have free space.'''
if self.GetAvailableGridPlaces() == []:
return True
return False
def WhoWin(self):
'''Return Players name if one of them won, "draw" if nobody win, but grid don\'t have free space, and None if grid is have free space.'''
for sign in ['X', 'O']:
stat = ((self.grid[1] == sign and self.grid[2] == sign and self.grid[3] == sign) or (self.grid[4] == sign and self.grid[5] == sign and self.grid[6] == sign) or (self.grid[7] == sign and self.grid[8] == sign and self.grid[9] == sign) or (self.grid[1] == sign and self.grid[4] == sign and self.grid[7] == sign) or (self.grid[2] == sign and self.grid[5] == sign and self.grid[8] == sign) or (self.grid[3] == sign and self.grid[6] == sign and self.grid[9] == sign) or (self.grid[1] == sign and self.grid[5] == sign and self.grid[9] == sign) or (self.grid[3] == sign and self.grid[5] == sign and self.grid[7] == sign))
if stat is True:
if self.pc_sign == sign:
return 'pc'
elif self.player_sign == sign:
return 'player'
else:
raise Exception('PC or Player sign is not installed!')
else:
continue
if self.GridIsFull():
return 'draw'
return None
def WhoWinByScore(self):
'''Return Players name if one of them won, "draw" if nobody win, and None if PC and Player have 0 score, and 0 draws.'''
if (((self.pc_score == 0) and (self.player_score == 0)) and (self.draws == 0)):
return None
else:
if self.pc_score > self.player_score:
return 'pc'
elif self.pc_score < self.player_score:
return 'player'
else:
return 'draw'
def ResetMoves(self):
'''Set last_move to None.'''
self.last_move = None
return True
def Game():
'''Main function in Tic Tac Toe game.\n'''
try:
print()
print(Vars.game_welcome)
print()
session = GameSession()
player_sign = str(input('[?] Enter what a sign you want (X or O): '))
if player_sign.upper() in ['X', 'O']:
if player_sign.upper() == 'X':
pc_sign = 'O'
else:
pc_sign = 'X'
else:
print('[!] Wrong choice! Using random for choice...')
player_sign = Utils.GetRandomSign()
if player_sign.upper() == 'X':
pc_sign = 'O'
else:
pc_sign = 'X'
session.InstallSigns(player_sign, pc_sign)
session.InstallWhoNextMove()
session.play = True
print(f'Player sign is "{player_sign}"')
sleep(2)
while session.play:
Utils.ClearConsole()
print('\n-= SCORE =-')
print(' ' + session.GetScore())
print(session.GetGrid())
win = session.WhoWin()
if win:
if win == 'player':
print('\nPlayer win!')
session.player_score += 1
elif win == 'pc':
print('\n PC win!')
session.pc_score += 1
else:
print('\nIt\'s a draw!')
session.draws += 1
session.ResetGrid()
session.ResetMoves()
session.InstallWhoNextMove()
sleep(2)
continue
else:
if session.IsPlayerMove():
print(Vars.game_player_move)
print()
print(Vars.game_help)
print()
cmd = str(input('>>> '))
if cmd.isdigit():
if int(cmd) in range(1, 10):
if int(cmd) in session.GetAvailableGridPlaces():
session.MakeMove(cmd, session.player_sign)
session.last_move = 'player'
continue
else:
print('\nThis place is engaged! Please choice other.')
sleep(2)
continue
else:
print('\n[!] Please use numbers in grid for making move!')
sleep(2)
continue
else:
if cmd == 'help':
print(Vars.game_help)
print()
input('Press ENTER button for continue.')
continue
elif cmd == '':
print(Vars.game_help)
print()
sleep(2)
continue
elif cmd == 'digit':
if session.digit_grid is True:
print('Disabling numbers in grid...')
session.DisableNumbersInGrid()
else:
print('Enabling numbers in grid...')
session.EnableNumbersInGrid()
sleep(2)
continue
elif cmd == 'rules':
print(Vars.game_rules)
print()
input('Press ENTER button for continue.')
continue
elif cmd == 'exit':
q = str(input('[?] You are sure (Yes/No): ')).lower()
if q.startswith('y'):
print('Okay!')
sleep(1)
session.play = False
break
else:
print('Continue!')
sleep(1)
continue
elif cmd == 'cmd':
print('\n-= CMDs =-\n"help" – Print help.\n"rules" – Print rules.\n"exit" – For exit from game and take draw.\n"digit" – For disable print numbers in grid.\n"debug" – Print debug info.\n"cmd" – Print this message.\n')
input('Press ENTER button for continue.')
continue
elif cmd == 'debug':
print('\n-= DEBUG =-')
print(f'\nplay: {str(session.play)}\nplayer_sign: {session.player_sign}\npc_sign: {session.pc_sign}\nlast_move: {str(session.last_move)}\nscore: {session.GetScore()}\ngrid: {str(session.grid)}\ndigit_grid: {str(session.digit_grid)}\n')
input('Press ENTER button for continue.')
continue
else:
print('\n[!] Unrecognized command!')
sleep(2)
continue
else:
print(Vars.game_pc_move)
session.MakeMove(session.PcRandomMove(), session.pc_sign)
session.last_move = 'pc'
sleep(2)
continue
Utils.ClearConsole()
print('-= SCORE =-')
print(' ' + session.GetScore())
win = session.WhoWinByScore()
if win:
if win == 'player':
print(f'\nYou did it! You won PC with scope: {str(session.player_score)}:{str(session.pc_score)}. Draws: {str(session.draws)}')
elif win == 'pc':
print(f'\nSo sad! You lose to PC with scope: {str(session.pc_score)}:{str(session.player_score)}. Draws: {str(session.draws)}')
else:
print(f'It\'s a draw... Score: {session.GetScore()}')
else:
print('You don\'t play!')
print('It was be a nice game, bye & have a nice day! ^^')
if __name__ == '__main__':
exit()
except KeyboardInterrupt:
print('\n[!] Emergency exit!')
sleep(1)
if __name__ | |
"""
interfaces.py
`FormsiteInterface` `FormsiteParams` and `FormsiteCredentials` classes are defined here.
Author: <NAME>
Documentation: https://github.com/strny0/formsite-utility
"""
from __future__ import annotations
import csv
import asyncio
from datetime import datetime as dt
from datetime import timedelta as td
from pathlib import Path
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Optional, Set, Union, Tuple, List
import re
import os
import pandas as pd
from pytz import UnknownTimeZoneError, timezone as pytztimezone
import requests
from .downloader import _FormsiteDownloader
from .processing import _FormsiteProcessing
from .api import _FormsiteAPI
from .auth import FormsiteCredentials
from tqdm import tqdm
__version__ = "1.3.20"
__author__ = "<EMAIL>"
def _shift_param_date(date: Union[str, dt], timezone_offset: td) -> str:
"""Shifts input date in the string format/datetime by timedelta in timezone offset.
The offset is additive, date + timezone_offset.
Args:
date (Union[str, dt]): String in formats: 'yyyy-mm-dd', 'yyyy-mm-dd HH:MM:SS' or 'yyyy-mm-ddTHH:MM:SSZ' or datetime.
timezone_offset (td): A timedelta value representing addition to 'date'.
Notes:
If you input time in ISO 8601 UTC, timezone offset won't be applied.
Raises:
ValueError: Raised if string format is not recognized.
Returns:
str: A datetime string in 'yyyy-mm-ddTHH:MM:SSZ' format, shifted by timezone_offset amount.
"""
if isinstance(date, dt):
date = date + timezone_offset
else:
formats = ["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d", "%Y-%m-%d %H:%M:%S"]
for f in formats:
try:
date = dt.strptime(date, f)
date = date + timezone_offset
break
except ValueError:
continue
if not isinstance(date, dt):
raise ValueError("""invalid date format input for afterdate/beforedate, please use a datetime object or string in ISO 8601, yyyy-mm-dd or yyyy-mm-dd HH:MM:SS format""")
return dt.strftime(date, "%Y-%m-%dT%H:%M:%SZ")
def _extract_timezone_from_str(timezone: str, timezone_re: str) -> td:
"""Parses input timezone.
Args:
timezone (str): string in format ['+0200', '02:00', +02:00', '16:48', '-05:00', '-0600']
timezone_re (str): regex encompassing above mentioned valid formats
Returns:
timedelta: timedelta offset
"""
tz_offset = None
if re.search(timezone_re, timezone) is not None:
tz_str = timezone.replace(r"\'", "").replace(r"\"", "")
if ":" in tz_str:
tz_tuple = tz_str.split(":", 1)
else:
t = len(tz_str) - 2
tz_tuple = (tz_str[:t], tz_str[-2:])
tz_offset = td(hours=int(tz_tuple[0]), seconds=int(tz_tuple[1]) / 60)
return tz_offset
def _calculate_tz_offset(timezone: str) -> Tuple[td, td, dt]:
"""Calculates timezone offset relative to input TARGET timezone string UTC.
Args:
timezone (str): String in format Ex. 'America/Chicago' (tz_database_name) or offset like '0400', '+0400', '-7:00', '-14:00'
Raises:
UnknownTimeZoneError: If input is not a valid tz_databse name or offset.
Returns:
Tuple[timedelta, timedelta, datetime]: tuple of offset_from_local time as a timedelta, offset from utc and local_time (datetime)
"""
local_date = dt.now()
utc_date = dt.utcnow()
diff_local_utc = local_date - utc_date
offset_utc = local_date - utc_date
if timezone == "local":
offset_local = td(seconds=0)
else:
tz_input_regex = r"(\+|\-|)([0-1]\d[0-5]\d|[0-1]\d\:[0-5]\d|\d\:[0-5]\d)"
# parses the following formats:
# America/Chicago
# +1200
# 1300
# -1200
# 8:00
# -8:00
offset_local = _extract_timezone_from_str(timezone, tz_input_regex)
if offset_local is None:
if re.search(r"\w+/\w+", timezone) is not None:
off_local = pytztimezone(timezone).localize(local_date).strftime("%z")
t = len(off_local) - 2
l_inp = (off_local[:t], off_local[-2:])
offset_utc = td(hours=int(l_inp[0]), seconds=int(l_inp[1]) / 60)
offset_local = offset_utc - diff_local_utc
else:
raise UnknownTimeZoneError(timezone)
return offset_local, offset_utc, local_date
def _validate_path(path: str) -> str:
"""Converts input path into POSIX format. Creates parent directories if necessary.
Args:
path (str): path to a file or a folder in any format
Returns:
str: path to a file or a folder in posix format
"""
output_file = Path(path).resolve().absolute()
if output_file.is_dir():
os.makedirs(output_file.as_posix(), exist_ok=True)
else:
os.makedirs(output_file.parent.as_posix(), exist_ok=True)
return output_file.as_posix()
@dataclass
class FormsiteParams:
"""Parameters class
This class stores parameters for Formsite requests\n
`afterref` gets only results greater than integer you provide\n
`beforeref` gets only results less than integer you provide\n
`afterdate` gets only results greater than input you provide, expects a `datetime object` or string in `ISO 8601`, `yyyy-mm-dd`, `yyyy-mm-dd HH:MM:SS`\n
`beforedate` gets only results less than input you provide, expects a `datetime object` or string in `ISO 8601`, `yyyy-mm-dd`, `yyyy-mm-dd HH:MM:SS`\n
`timezone` sets the timezone dates in results are relative to, also affects input dates. Expects either an offset string in format eg. `+06:00` or database name eg. `America/Chicago`\n
`date_format` using python datetime directives specify what format you want your dates in your csv output file. Defaults to `%Y-%m-%d %H:%M:%S`\n
`resultslabels` and `resultsview` More info on Formsite website or FS API of your specific form.\n
`sort` ( "asc" | "desc" ) sorts results by reference number in ascending or descending order.
"""
last: Optional[int] = None
afterref: Optional[int] = None
beforeref: Optional[int] = None
afterdate: Optional[Union[str, dt]] = None
beforedate: Optional[Union[str, dt]] = None
timezone: Optional[str] = "local"
resultslabels: Optional[int] = None
resultsview: Optional[int] = 11
sort: Optional[str] = "desc"
def __post_init__(self):
"""Calls `_calculate_tz_offset` internal function."""
self.timezone = "local" if self.timezone is None else self.timezone
self.timezone_offset, self.timezone_offset_utc, self.local_datetime = _calculate_tz_offset(self.timezone)
self.filters = dict()
def get_params_as_dict(self, single_page_limit: int = 500) -> dict:
"""Generates a parameters dictionary that is later passed to params= kw argument when making API calls.
Args:
single_page_limit (int, optional): Results per page limit, 500 is maximum amount. Defaults to 500.
Returns:
dict: params dict
"""
results_params: dict[str, Union[str, int]] = dict()
results_params["page"] = 1
results_params["limit"] = single_page_limit
if self.afterref is not None:
results_params["after_id"] = self.afterref
if self.beforeref is not None:
results_params["before_id"] = self.beforeref
if self.afterdate is not None:
parsed_afterdate = _shift_param_date(self.afterdate, self.timezone_offset)
results_params["after_date"] = parsed_afterdate
if self.beforedate is not None:
parsed_beforedate = _shift_param_date(self.beforedate, self.timezone_offset)
results_params["before_date"] = parsed_beforedate
if self.resultsview is not None: # 11 = all items + statistics results view
results_params["results_view"] = self.resultsview
results_params.update(self.filters)
return results_params
def add_filter(self, search_type: str, col: Union[int, str], search_value: Any) -> None:
"""Get results where item with ID x matches search_type value.
You can edit `self.filters` directly to remove/change filters.
Args:
search_type (str): One of {'equals', 'contains', 'begins', 'ends'}.
col (Union[int, str]): Column ID or metadata column name.
search_value (Any): Value to search.
Raises:
Exception: Entered invalid search type.
"""
valid_types = {"equals", "contains", "begins", "ends"}
if search_type in valid_types:
if search_type == "equals":
self.filters.update({f"search_equals[{col}]": search_value})
elif search_type == "contains":
self.filters.update({f"search_contains[{col}]": search_value})
elif search_type == "begins":
self.filters.update({f"search_begins[{col}]": search_value})
elif search_type == "ends":
self.filters.update({f"search_ends[{col}]": search_value})
else:
raise ValueError(f"Invalid search type entered. Must be one of {valid_types}.")
def set_filter_method(self, method: str):
"""How to combine multiple search criteria.
If you don't run this method, defaults to 'and'.
Args:
method (str): One of {'and', 'or'}
Raises:
Exception: Entered invalid method.
"""
valid_methods = {"and", "or"}
if method in valid_methods:
self.filters.update({"search_method": method})
else:
raise ValueError(f"Invalid search method entered. Must be one of {valid_methods}.")
def get_items_as_dict(self) -> dict:
"""Returns a dict that gets parsed as parameters by aiohttp when making a request."""
if self.resultslabels is None:
return {}
elif isinstance(self.resultslabels, int):
return {"results_labels": self.resultslabels}
else:
raise ValueError(f"resultslabels must be an int, got '{self.resultslabels}'")
@dataclass
class FormsiteInterface:
"""A base class for interacting with the formsite API.
Documentation: https://pypi.org/project/formsite-util/
Author: https://github.com/strny0/formsite-utility
Note: You cannot change form_id, you must reinstantiate the class.
Args:
form_id (str): ID of the target form.
auth (FormsiteCredentials): Instance of the FormsiteCredentials class.
params (FormsiteParams): Instance of the FormsiteParams class.
display_progress (bool): Display tqdm progressbars. Defaults to True.
Internal variables:
`Data` (DataFrame | None): Stores results as dataframe.
`Links` (Set[str] | None): Stores all formsite upload links in a set.
`items` (Dict[str,str], None): RAW items json.
`results` (List[Dict[str,str]] | list): RAW list of results jsons.
`url_forms` (str): url for fetching info about all forms.
`url_files` (str): url for downloading files.
Methods of interest (and return types):
`FetchResults` (None): stores results in self.Data of the instance of this class.
`ReturnResults` (DataFrame): returns self.Results, if it's None, calls FetchResults first.
`WriteResults` (None): writes the dataframe to a file, if it's None, calls FetchResults first.
`ExtractLinks` (None): stores extracted links in self.Links of the instance of this class.
`ReturnLinks` (List[str]): returns a tuple of all links.
`WriteLinks` (None): writes them to a file.
`ListAllForms` (DataFrame | None): lists all forms on formsite, output them to console or save them to a file.
`ListColumns` (DataFrame | None): lists all columns and column IDs of a form you set the interface for.
`DownloadFiles` (None): downloads all files submitted to the form to a folder you specify.
`WriteLatestRef` (None): writes highest reference number in results to a file you specify.
Returns:
FormsiteInterface: An instance of the FormsiteInterface class.
"""
form_id: str
auth: FormsiteCredentials
params: FormsiteParams = FormsiteParams()
display_progress: bool | |
# -*- coding: utf-8 -*-
"""
Transformations from the breakdowns to some surface in 3-space
(sometimes 2-space)
Attributes:
PROJECTIONS: A map of maps. The first map is keyed on the name of a family
of projections, and the second is keyed on shape (3 or 4). For
instance, PROJECTIONS['nslerp'][3] gives the function to perform
nslerp on a triangle.
PARALLEL: A list of which projections are compatible with
parallel projection.
"""
import numpy as np
from numpy.linalg import norm
from . import xmath, breakdown
_TEST_EQ_TRI = np.eye(3)
_TEST_EQ_Q = xmath.normalize(np.array([[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1]]))
_TEST_SKEW_TRI = np.array([[0.8, 0.6, 0],
[0, 1, 0],
[0, 0, 1]])
_TEST_SKEW_Q = xmath.normalize(np.array([[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 0]]))
_TEST_BARY = np.array([[1, 0.8, 0.6, 0.4, 0.2, 0.0],
[0, 0.2, 0.2, 0.2, 0.5, 0.5],
[0, 0.0, 0.2, 0.4, 0.3, 0.5]]).T
_TEST_XY = np.array([[0, 1, 1, 0, 0.5, 0.5, 0.5, 0.3, 0.3, 0.3],
[0, 0, 1, 1, 0, 0.5, 1, 0, 0.5, 1]]).T
_TEST_FREQ = 4, 2
_TEST_TRI_LINDEX = np.array([[0, 4, 6],
[1, 4, 5],
[2, 3, 5],
[3, 2, 5],
[2, 4, 4],
[3, 3, 4]])
_TEST_Q_LINDEX = np.array([[2, 0],
[2, 1],
[3, 1],
[4, 1],
[1, 2],
[2, 2]])
_TEST_DISK_C = np.exp(np.linspace(0, 2j*np.pi, 7))*np.linspace(0, 1, 7)
_TEST_DISK_R = xmath.complex_to_float2d(_TEST_DISK_C)
_TEST_TRI_PTS = np.array([[0.8, 0.6, 0],
[0, 1, 0],
[0, 0, 1],
[0.4, 0.8, 0],
[0, 0.5, 0.5],
[0.4, 0.3, 0.5],
[4/15, 8/15, 1/3]])
_TEST_SPHERE_PTS = xmath.normalize(_TEST_TRI_PTS)
#generic methods, valid on any-d, any shape
def square_to_quad(xy, base_pts):
"""Transforms a square in [0,1]^2 to a (possibly skew) quadrilateral
defined by base_pts.
Args:
xy: Array, shape [..., 2]. The 2d coordinates of the point.
base_pts: Array, shape [4, ...]. The coordinates of the quadrilateral.
Should be in counterclockwise order to maintain orientation.
Returns:
Coordinates in whatever space base_pts was defined in.
>>> square_to_quad(_TEST_XY[:, np.newaxis], _TEST_SKEW_Q)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 0.70710678, 0.70710678],
[-0.70710678, 0. , 0.70710678],
[ 0. , -1. , 0. ],
[ 0.35355339, 0.35355339, 0.70710678],
[ 0. , -0.0732233 , 0.53033009],
[-0.35355339, -0.5 , 0.35355339],
[ 0.49497475, 0.21213203, 0.70710678],
[ 0.14142136, -0.24393398, 0.45961941],
[-0.21213203, -0.7 , 0.21213203]])
"""
a, b, c, d = base_pts[0], base_pts[1], base_pts[2], base_pts[3]
x, y = xy[..., 0], xy[..., 1]
return a + (b-a)*x + (d-a)*y + (a-b+c-d)*x*y
def tri_bary(bary, base_pts):
"""Transforms barycentric coordinates to a (Euclidean) triangle
defined by base_pts.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ...]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
Coordinates in whatever space base_pts was define in.
>>> tri_bary(_TEST_BARY, _TEST_SKEW_TRI)
array([[ 0.8 , 0.6 , 0. ],
[ 0.64, 0.68, 0. ],
[ 0.48, 0.56, 0.2 ],
[ 0.32, 0.44, 0.4 ],
[ 0.16, 0.62, 0.3 ],
[ 0. , 0.5 , 0.5 ]])
"""
return bary.dot(base_pts)
#methods for disks
def square_to_disk(xy, rotation=1):#np.exp(1j*np.pi/4)):
"""Transforms square on [0,1]^2 to unit disk.
>>> np.round(square_to_disk(_TEST_XY), 6)
array([[-0.707107, -0.707107],
[ 0.707107, -0.707107],
[ 0.707107, 0.707107],
[-0.707107, 0.707107],
[ 0. , -1. ],
[ 0. , 0. ],
[ 0. , 1. ],
[-0.371391, -0.928477],
[-0.4 , 0. ],
[-0.371391, 0.928477]])
"""
pts = 2*xy - 1
r = np.max(np.abs(pts), axis=-1)
theta = np.arctan2(pts[..., 1], pts[..., 0])
result = r*np.exp(1j*theta)*rotation
return xmath.complex_to_float2d(result)
DISK_TRI_C = np.exp(2j*np.pi/3*np.arange(3))*1j
DISK_TRI_R = xmath.complex_to_float2d(DISK_TRI_C)
def tri_to_disk(bary, rotation=1, pts=DISK_TRI_C):
"""Transforms triangle in barycentric coordinates to unit disk.
tri_naive_slerp also does this when pts are on a great circle,
with somewhat different results.
>>> np.round(tri_to_disk(_TEST_BARY), 6)
array([[ 0. , 1. ],
[-0.240192, 0.970725],
[-0. , 0.4 ],
[ 0.34641 , 0.2 ],
[-0.261861, -0.302372],
[-0. , -1. ]])
"""
tri_pts = bary.dot(pts)
angle = np.angle(tri_pts)
r = 1 - 3*bary.min(axis=-1)
result = r*np.exp(1j*angle)*rotation
return xmath.complex_to_float2d(result)
DISK_SQ_C = np.array([1, 1j, -1, -1j])
DISK_SQ_R = xmath.complex_to_float2d(DISK_SQ_C)
def _sq_disk(bkdn, base_pts, freq, tweak):
sc = square_to_disk(bkdn.coord)
sc2 = sc/np.sqrt(2) + 0.5
result = square_to_quad(sc2[:, np.newaxis], base_pts)
return result
def _tri_disk(bkdn, base_pts, freq, tweak):
rebary = bary_tri(tri_to_disk(bkdn.coord), DISK_TRI_R)
return tri_bary(rebary, base_pts)
#disk -> sphere
def spherical_to_xyz(phi, theta):
"""Converts spherical coordinates to 3d xyz coordinates.
Args:
phi: Inclination
theta: Azimuth
Returns:
An array with of shape = [..., 3].
>>> phi = np.arccos(np.linspace(-1, 1, 7))
>>> theta = np.arcsin(np.linspace(-1, 1, 7))
>>> np.round(spherical_to_xyz(phi, theta), 6)
array([[ 0. , -0. , -1. ],
[ 0.555556, -0.496904, -0.666667],
[ 0.888889, -0.31427 , -0.333333],
[ 1. , 0. , 0. ],
[ 0.888889, 0.31427 , 0.333333],
[ 0.555556, 0.496904, 0.666667],
[ 0. , 0. , 1. ]])
"""
return np.array([np.sin(phi) * np.cos(theta), # pylint: disable=no-member
np.sin(phi) * np.sin(theta),
np.cos(phi)]).T
def lambert(disk):
"""Converts coordinates on the disk to spherical coordinates, using
the Lambert azimuthal equal-area projection.
Args:
disk: Array of shape [..., 2] representing points on the disk.
Returns:
phi, theta: Spherical coordinates
>>> phi, theta = lambert(_TEST_DISK_R)
>>> np.round(phi*180/np.pi, 2)
array([ 0. , 19.19, 38.94, 60. , 83.62, 112.89, 180. ])
>>> np.round(theta*180/np.pi, 2)
array([ 0., 60., 120., 180., -120., -60., -0.])
"""
theta = np.arctan2(disk[..., 1], disk[..., 0])
phi = 2*np.arcsin(np.linalg.norm(disk, axis=-1))
return phi, theta
def equidistant(disk):
"""Converts coordinates on the disk to spherical coordinates, using
the azimuthal equal-distance projection.
Args:
disk: Array of shape [..., 2] representing points on the disk.
Returns:
phi, theta: Spherical coordinates
>>> phi, theta = equidistant(_TEST_DISK_R)
>>> np.round(phi*180/np.pi, 2)
array([ 0., 30., 60., 90., 120., 150., 180.])
>>> np.round(theta*180/np.pi, 2)
array([ 0., 60., 120., 180., -120., -60., -0.])
"""
theta = np.arctan2(disk[..., 1], disk[..., 0])
phi = np.linalg.norm(disk, axis=-1)*np.pi
return phi, theta
#methods for spheres
#triangles -> spherical triangle
def tri_naive_slerp(bary, base_pts):
"""
Naive slerp (spherical linear interpolation) on a spherical triangle.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ..., 3]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> tri_naive_slerp(_TEST_BARY, _TEST_EQ_TRI)
array([[ 1. , 0. , 0. ],
[ 0.95105652, 0.30901699, 0. ],
[ 0.80901699, 0.30901699, 0.30901699],
[ 0.58778525, 0.30901699, 0.58778525],
[ 0.30901699, 0.70710678, 0.4539905 ],
[ 0. , 0.70710678, 0.70710678]])
"""
angle = xmath.central_angle_equilateral(base_pts)
b = np.sin(angle * bary) / np.sin(angle)
return b.dot(base_pts)
def tri_areal(bary, base_pts):
"""Given a triangle and spherical areal coordinates, returns the vectors
cooresponding to those coordinates.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ..., 3]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> tri_areal(_TEST_BARY, _TEST_SKEW_TRI)
array([[ 0.8 , 0.6 , 0. ],
[ 0.67564273, 0.7372292 , 0. ],
[ 0.59542957, 0.71123145, 0.37364883],
[ 0.42703426, 0.59682523, 0.67929477],
[ 0.21874817, 0.81375629, 0.53847 ],
[ 0. , 0.63544017, 0.77215011]])
"""
base_pts = xmath.normalize(base_pts)
area = xmath.triangle_solid_angle(base_pts[0], base_pts[1], base_pts[2])
area_i = bary * area
base_pts_iplus1 = np.roll(base_pts, -1, axis=0)
base_pts_iplus2 = np.roll(base_pts, 1, axis=0)
#FIXME whytf is this commented statement not equivalent to below?
# L = ((1 + np.cos(area_i))[:, np.newaxis]*
# np.cross(base_pts_iplus1, base_pts_iplus2) -
# np.sin(area_i)[:, np.newaxis]*
# (base_pts_iplus1 + base_pts_iplus2)).transpose((0,2,1))
L0 = ((1 + np.cos(area_i[..., 0]))[..., np.newaxis]*
np.cross(base_pts[1], base_pts[2]) -
np.sin(area_i[..., 0])[..., np.newaxis]*
(base_pts[1] + base_pts[2]))
L1 = ((1 + np.cos(area_i[..., 1]))[..., np.newaxis]*
np.cross(base_pts[2], base_pts[0]) -
np.sin(area_i[..., 1])[..., np.newaxis]*
(base_pts[2] + base_pts[0]))
L2 = ((1 + np.cos(area_i[..., 2]))[..., np.newaxis]*
np.cross(base_pts[0], base_pts[1]) -
np.sin(area_i[..., 2])[..., np.newaxis]*
(base_pts[0] + base_pts[1]))
L = np.stack([L0, L1, L2], axis=-2)
h = np.sin(area_i)*(1 + np.sum(base_pts_iplus1*base_pts_iplus2, axis=-1))
return np.linalg.solve(L, h)
def triangles_method2(lindex, base_pts, freq):
"""Triangles of method 2
Args:
lindex: Array, shape (..., 3). Linear indexes (should correspond
to freq)
base_pts: Array, shape (3, ..., 3). Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
freq: 2-tuple. Frequency of the subdivision.
Returns:
Array of shape (..., 3, 3)
>>> np.round(triangles_method2(_TEST_TRI_LINDEX[1:3], _TEST_SKEW_TRI,
... _TEST_FREQ), 6)
array([[[ 0.205392, 0.183498, 0.051016],
[ 0.34558 , 0.317059, 0.10024 ],
[ 0.098942, 0.085439, 0.03013 ]],
<BLANKLINE>
[[ 0.283136, 0.377856, 0.038599],
[ 0.24816 , 0.339397, 0.042048],
[ 0.152133, 0.199126, 0.028172]]])
"""
n, m = freq
frame = breakdown.frame_triangle(n, m, base_pts=base_pts,
interp=xmath.slerp)
#get the normal to the great circle corresponding to the lines
#don't need to normalize this
gc_normals = np.cross(frame[..., 0, :], frame[..., 1, :])
index = np.arange(3)
| |
ns_prefix
def get_RegionRefIndexed(self):
return self.RegionRefIndexed
def set_RegionRefIndexed(self, RegionRefIndexed):
self.RegionRefIndexed = RegionRefIndexed
def add_RegionRefIndexed(self, value):
self.RegionRefIndexed.append(value)
def insert_RegionRefIndexed_at(self, index, value):
self.RegionRefIndexed.insert(index, value)
def replace_RegionRefIndexed_at(self, index, value):
self.RegionRefIndexed[index] = value
def get_OrderedGroupIndexed(self):
return self.OrderedGroupIndexed
def set_OrderedGroupIndexed(self, OrderedGroupIndexed):
self.OrderedGroupIndexed = OrderedGroupIndexed
def add_OrderedGroupIndexed(self, value):
self.OrderedGroupIndexed.append(value)
def insert_OrderedGroupIndexed_at(self, index, value):
self.OrderedGroupIndexed.insert(index, value)
def replace_OrderedGroupIndexed_at(self, index, value):
self.OrderedGroupIndexed[index] = value
def get_UnorderedGroupIndexed(self):
return self.UnorderedGroupIndexed
def set_UnorderedGroupIndexed(self, UnorderedGroupIndexed):
self.UnorderedGroupIndexed = UnorderedGroupIndexed
def add_UnorderedGroupIndexed(self, value):
self.UnorderedGroupIndexed.append(value)
def insert_UnorderedGroupIndexed_at(self, index, value):
self.UnorderedGroupIndexed.insert(index, value)
def replace_UnorderedGroupIndexed_at(self, index, value):
self.UnorderedGroupIndexed[index] = value
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_index(self):
return self.index
def set_index(self, index):
self.index = index
def hasContent_(self):
if (
self.RegionRefIndexed or
self.OrderedGroupIndexed or
self.UnorderedGroupIndexed
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='OrderedGroupIndexedType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('OrderedGroupIndexedType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'OrderedGroupIndexedType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='OrderedGroupIndexedType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='OrderedGroupIndexedType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='OrderedGroupIndexedType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.index is not None and 'index' not in already_processed:
already_processed.add('index')
outfile.write(' index="%s"' % self.gds_format_integer(self.index, input_name='index'))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='OrderedGroupIndexedType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for RegionRefIndexed_ in self.RegionRefIndexed:
namespaceprefix_ = self.RegionRefIndexed_nsprefix_ + ':' if (UseCapturedNS_ and self.RegionRefIndexed_nsprefix_) else ''
RegionRefIndexed_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='RegionRefIndexed', pretty_print=pretty_print)
for OrderedGroupIndexed_ in self.OrderedGroupIndexed:
namespaceprefix_ = self.OrderedGroupIndexed_nsprefix_ + ':' if (UseCapturedNS_ and self.OrderedGroupIndexed_nsprefix_) else ''
OrderedGroupIndexed_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='OrderedGroupIndexed', pretty_print=pretty_print)
for UnorderedGroupIndexed_ in self.UnorderedGroupIndexed:
namespaceprefix_ = self.UnorderedGroupIndexed_nsprefix_ + ':' if (UseCapturedNS_ and self.UnorderedGroupIndexed_nsprefix_) else ''
UnorderedGroupIndexed_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnorderedGroupIndexed', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('index', node)
if value is not None and 'index' not in already_processed:
already_processed.add('index')
self.index = self.gds_parse_integer(value, node, 'index')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'RegionRefIndexed':
obj_ = RegionRefIndexedType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.RegionRefIndexed.append(obj_)
obj_.original_tagname_ = 'RegionRefIndexed'
elif nodeName_ == 'OrderedGroupIndexed':
obj_ = OrderedGroupIndexedType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.OrderedGroupIndexed.append(obj_)
obj_.original_tagname_ = 'OrderedGroupIndexed'
elif nodeName_ == 'UnorderedGroupIndexed':
obj_ = UnorderedGroupIndexedType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnorderedGroupIndexed.append(obj_)
obj_.original_tagname_ = 'UnorderedGroupIndexed'
# end class OrderedGroupIndexedType
class UnorderedGroupIndexedType(GeneratedsSuper):
"""Indexed group containing unordered elements
Position (order number) of this item within the
current hierarchy level."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, index=None, RegionRef=None, OrderedGroup=None, UnorderedGroup=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.index = _cast(int, index)
self.index_nsprefix_ = None
if RegionRef is None:
self.RegionRef = []
else:
self.RegionRef = RegionRef
self.RegionRef_nsprefix_ = None
if OrderedGroup is None:
self.OrderedGroup = []
else:
self.OrderedGroup = OrderedGroup
self.OrderedGroup_nsprefix_ = None
if UnorderedGroup is None:
self.UnorderedGroup = []
else:
self.UnorderedGroup = UnorderedGroup
self.UnorderedGroup_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UnorderedGroupIndexedType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UnorderedGroupIndexedType.subclass:
return UnorderedGroupIndexedType.subclass(*args_, **kwargs_)
else:
return UnorderedGroupIndexedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_RegionRef(self):
return self.RegionRef
def set_RegionRef(self, RegionRef):
self.RegionRef = RegionRef
def add_RegionRef(self, value):
self.RegionRef.append(value)
def insert_RegionRef_at(self, index, value):
self.RegionRef.insert(index, value)
def replace_RegionRef_at(self, index, value):
self.RegionRef[index] = value
def get_OrderedGroup(self):
return self.OrderedGroup
def set_OrderedGroup(self, OrderedGroup):
self.OrderedGroup = OrderedGroup
def add_OrderedGroup(self, value):
self.OrderedGroup.append(value)
def insert_OrderedGroup_at(self, index, value):
self.OrderedGroup.insert(index, value)
def replace_OrderedGroup_at(self, index, value):
self.OrderedGroup[index] = value
def get_UnorderedGroup(self):
return self.UnorderedGroup
def set_UnorderedGroup(self, UnorderedGroup):
self.UnorderedGroup = UnorderedGroup
def add_UnorderedGroup(self, value):
self.UnorderedGroup.append(value)
def insert_UnorderedGroup_at(self, index, value):
self.UnorderedGroup.insert(index, value)
def replace_UnorderedGroup_at(self, index, value):
self.UnorderedGroup[index] = value
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_index(self):
return self.index
def set_index(self, index):
self.index = index
def hasContent_(self):
if (
self.RegionRef or
self.OrderedGroup or
self.UnorderedGroup
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='UnorderedGroupIndexedType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UnorderedGroupIndexedType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UnorderedGroupIndexedType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UnorderedGroupIndexedType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UnorderedGroupIndexedType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UnorderedGroupIndexedType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.index is not None and 'index' not in already_processed:
already_processed.add('index')
outfile.write(' index="%s"' % self.gds_format_integer(self.index, input_name='index'))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='UnorderedGroupIndexedType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for RegionRef_ in self.RegionRef:
namespaceprefix_ = self.RegionRef_nsprefix_ + ':' if (UseCapturedNS_ and self.RegionRef_nsprefix_) else ''
RegionRef_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='RegionRef', pretty_print=pretty_print)
for OrderedGroup_ in self.OrderedGroup:
namespaceprefix_ = self.OrderedGroup_nsprefix_ + ':' if (UseCapturedNS_ and self.OrderedGroup_nsprefix_) else ''
OrderedGroup_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='OrderedGroup', pretty_print=pretty_print)
for UnorderedGroup_ in self.UnorderedGroup:
namespaceprefix_ = self.UnorderedGroup_nsprefix_ + ':' if (UseCapturedNS_ and self.UnorderedGroup_nsprefix_) else ''
UnorderedGroup_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnorderedGroup', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('index', node)
if value is not None and 'index' not in already_processed:
already_processed.add('index')
self.index = self.gds_parse_integer(value, node, 'index')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'RegionRef':
obj_ = RegionRefType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.RegionRef.append(obj_)
obj_.original_tagname_ = 'RegionRef'
elif nodeName_ == 'OrderedGroup':
obj_ = OrderedGroupType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.OrderedGroup.append(obj_)
obj_.original_tagname_ = 'OrderedGroup'
elif nodeName_ == 'UnorderedGroup':
obj_ = UnorderedGroupType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnorderedGroup.append(obj_)
obj_.original_tagname_ = 'UnorderedGroup'
# end class UnorderedGroupIndexedType
class RegionRefType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, regionRef=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.regionRef = _cast(None, regionRef)
self.regionRef_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RegionRefType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RegionRefType.subclass:
return RegionRefType.subclass(*args_, **kwargs_)
else:
return RegionRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_regionRef(self):
return self.regionRef
def set_regionRef(self, regionRef):
self.regionRef = regionRef
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='RegionRefType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RegionRefType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'RegionRefType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RegionRefType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='RegionRefType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='RegionRefType'):
| |
['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# SeriesDate
0x00080021L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
None: ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'SEGMENTATION IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'COLON CAD SR IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'BASIC TEXT SR IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'COMPREHENSIVE SR IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
'CHEST CAD SR IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'ENHANCED SR IOD': ['Series'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'X-RAY RADIATION DOSE SR IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'MAMMOGRAPHY CAD SR IOD': ['Series'],
'PROCEDURE LOG IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
},
# FrameOfInterestDescription
0x00286022L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# DigitalImageFormatAcquired
0x00181023L: {
'SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Equipment'],
None: ['Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED CDA IOD': ['Equipment'],
},
# IssuerOfPatientIDQualifiersSequence
0x00100024L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient', 'Rt Ion Machine Verification', 'Unified Procedure Step', 'Modality Performed Procedure Step', 'Rt Conventional Machine Verification', 'General Purpose Performed Procedure Step', 'General Purpose Scheduled Procedure Step'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# PlanarConfiguration
0x00280006L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED | |
<filename>fabric_cf/actor/db/psql_database.py
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
import logging
import pickle
from contextlib import contextmanager
from datetime import datetime, timezone
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.common.exceptions import DatabaseException
from fabric_cf.actor.db import Base, Clients, ConfigMappings, Proxies, Units, Reservations, Slices, ManagerObjects, \
Miscellaneous, Actors, Delegations
@contextmanager
def session_scope(psql_db_engine):
"""Provide a transactional scope around a series of operations."""
session = scoped_session(sessionmaker(psql_db_engine))
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class PsqlDatabase:
"""
Implements interface to Postgres database
"""
OBJECT_NOT_FOUND = "{} Not Found {}"
def __init__(self, *, user: str, password: str, database: str, db_host: str, logger):
# Connecting to PostgreSQL server at localhost using psycopg2 DBAPI
self.db_engine = create_engine("postgresql+psycopg2://{}:{}@{}/{}".format(user, password, db_host, database))
self.logger = logger
def create_db(self):
"""
Create the database
"""
Base.metadata.create_all(self.db_engine)
def set_logger(self, logger):
"""
Set the logger
"""
self.logger = logger
def reset_db(self):
"""
Reset the database
"""
try:
with session_scope(self.db_engine) as session:
session.query(Clients).delete()
session.query(ConfigMappings).delete()
session.query(Proxies).delete()
session.query(Units).delete()
session.query(Delegations).delete()
session.query(Reservations).delete()
session.query(Slices).delete()
session.query(ManagerObjects).delete()
session.query(Miscellaneous).delete()
session.query(Actors).delete()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def add_actor(self, *, name: str, guid: str, act_type: int, properties):
"""
Add an actor
@param name name
@param guid guid
@param act_type actor type
@param properties pickle dump for actor instance
"""
try:
# Save the actor in the database
actor_obj = Actors(act_name=name, act_guid=guid, act_type=act_type, properties=properties)
with session_scope(self.db_engine) as session:
session.add(actor_obj)
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def update_actor(self, *, name: str, properties):
"""
Update an actor
@param name name
@param properties pickle dump for actor instance
"""
try:
with session_scope(self.db_engine) as session:
actor = session.query(Actors).filter_by(act_name=name).first()
if actor is not None:
actor.properties = properties
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def remove_actor(self, *, name: str):
"""
Remove an actor
@param name name
"""
try:
# Delete the actor in the database
with session_scope(self.db_engine) as session:
session.query(Actors).filter(Actors.act_name == name).delete()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def get_actors(self) -> list:
"""
Get all actors
@return list of actors
"""
result = []
try:
with session_scope(self.db_engine) as session:
for row in session.query(Actors).all():
actor = {'act_guid': row.act_guid, 'act_name': row.act_name, 'act_type': row.act_type,
'properties': row.properties, 'act_id': row.act_id}
result.append(actor.copy())
actor.clear()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_actors_by_name_and_type(self, *, actor_name: str, act_type: int) -> list:
"""
Get actors by name and actor type
@param actor_name actor name
@param act_type actor type
@return list of actors
"""
result = []
try:
with session_scope(self.db_engine) as session:
for row in session.query(Actors).filter(Actors.act_type == act_type).filter(
Actors.act_name.like(actor_name)).all():
actor = {'act_guid': row.act_guid, 'act_name': row.act_name, 'act_type': row.act_type,
'properties': row.properties, 'act_id': row.act_id}
result.append(actor.copy())
actor.clear()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_actors_by_name(self, *, act_name: str) -> list:
"""
Get actors by name
@param act_name actor name
@return list of actors
"""
result = []
try:
with session_scope(self.db_engine) as session:
for row in session.query(Actors).filter(Actors.act_name.like(act_name)).all():
actor = {'act_guid': row.act_guid, 'act_name': row.act_name, 'act_type': row.act_type,
'properties': row.properties, 'act_id': row.act_id}
result.append(actor.copy())
actor.clear()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_actor(self, *, name: str) -> dict:
"""
Get actor by name
@return actor
"""
result = {}
try:
with session_scope(self.db_engine) as session:
actor = session.query(Actors).filter_by(act_name=name).first()
if actor is not None:
result['act_guid'] = actor.act_guid
result['act_name'] = actor.act_name
result['act_type'] = actor.act_type
result['properties'] = actor.properties
result['act_id'] = actor.act_id
else:
result = None
self.logger.error("Actor: {} not found!".format(name))
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def add_miscellaneous(self, *, name: str, properties: dict):
"""
Add Miscellaneous entries
@param name name
@param properties properties
"""
try:
msc_obj = Miscellaneous(msc_path=name, properties=properties)
with session_scope(self.db_engine) as session:
session.add(msc_obj)
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def get_miscellaneous(self, *, name: str) -> dict or None:
"""
Get Miscellaneous entry
@param name name
@return entry identified by name
"""
result = {}
try:
with session_scope(self.db_engine) as session:
msc_obj = session.query(Miscellaneous).filter_by(msc_path=name).first()
if msc_obj is not None:
result = msc_obj.properties
else:
return None
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def add_manager_object(self, *, manager_key: str, properties: dict, act_id: int = None):
"""
Add mananger object
@param manager_key management object key
@param properties properties
@param act_id actor id
"""
try:
if act_id is not None:
mng_obj = ManagerObjects(mo_key=manager_key, mo_act_id=act_id, properties=properties)
else:
mng_obj = ManagerObjects(mo_key=manager_key, properties=properties)
with session_scope(self.db_engine) as session:
session.add(mng_obj)
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def remove_manager_object(self, *, manager_key: str):
"""
Remove management object
@param manager_key management object key
"""
try:
with session_scope(self.db_engine) as session:
session.query(ManagerObjects).filter(ManagerObjects.mo_key == manager_key).delete()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def remove_manager_object_by_actor(self, *, act_id: int):
"""
Remove management object by actor id
@param act_id actor id
"""
try:
with session_scope(self.db_engine) as session:
session.query(ManagerObjects).filter(ManagerObjects.mo_act_id == act_id).delete()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def get_manager_objects(self, *, act_id: int = None) -> list:
"""
Get Management objects
@param act_id actor id
@return list of objects
"""
result = []
try:
with session_scope(self.db_engine) as session:
if act_id is None:
for row in session.query(ManagerObjects).all():
mo = {'mo_act_id': row.mo_act_id, 'mo_key': row.mo_key, 'properties': row.properties,
'mo_id': row.mo_id}
result.append(mo.copy())
mo.clear()
else:
for row in session.query(ManagerObjects).filter(ManagerObjects.mo_act_id == act_id).all():
mo = {'mo_act_id': row.mo_act_id, 'mo_key': row.mo_key, 'properties': row.properties,
'mo_id': row.mo_id}
result.append(mo.copy())
mo.clear()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_manager_objects_by_actor_name(self, *, act_name: str = None) -> list:
"""
Get Management objects
@param act_name actor name
@return list of objects
"""
result = []
try:
act_obj = self.get_actor(name=act_name)
act_id = act_obj['act_id']
result = self.get_manager_objects(act_id=act_id)
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_manager_object(self, *, mo_key: str) -> dict:
"""
Get Management object by key
@param mo_key management object key
@return objects
"""
result = {}
try:
with session_scope(self.db_engine) as session:
mo_obj = session.query(ManagerObjects).filter_by(mo_key=mo_key).first()
if mo_obj is not None:
result['mo_id'] = mo_obj.mo_id
result['mo_key'] = mo_obj.mo_key
if mo_obj.mo_act_id is not None:
result['mo_act_id'] = mo_obj.mo_act_id
result['properties'] = mo_obj.properties
else:
raise DatabaseException(self.OBJECT_NOT_FOUND.format("Manager Object", mo_key))
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def get_manager_containers(self) -> list:
"""
Get Management object for the container i.e entry with no actor id
@return object
"""
result = []
try:
with session_scope(self.db_engine) as session:
for mo_obj in session.query(ManagerObjects).filter(ManagerObjects.mo_act_id.is_(None)).all():
mo = {'mo_id': mo_obj.mo_id, 'mo_key': mo_obj.mo_key, 'properties': mo_obj.properties}
result.append(mo.copy())
mo.clear()
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
return result
def add_slice(self, *, slc_guid: str, slc_name: str, slc_type: int, slc_state: int, lease_start: datetime = None,
lease_end: datetime = None, slc_resource_type: str, properties, slc_graph_id: str = None,
oidc_claim_sub: str = None, email: str = None, project_id: str = None):
"""
Add a slice
@param slc_guid slice id
@param slc_name slice name
@param slc_type slice type
@param slc_state slice state
@param slc_resource_type slice resource type
@param lease_start Lease Start time
@param lease_end Lease End time
@param properties pickled instance
@param slc_graph_id slice graph id
@param oidc_claim_sub User OIDC Sub
@param email User Email
@param project_id Project Id
"""
try:
slc_obj = Slices(slc_guid=slc_guid, slc_name=slc_name, slc_type=slc_type, slc_state=slc_state,
oidc_claim_sub=oidc_claim_sub, email=email, slc_resource_type=slc_resource_type,
lease_start=lease_start, lease_end=lease_end, properties=properties,
project_id=project_id)
if slc_graph_id is not None:
slc_obj.slc_graph_id = slc_graph_id
with session_scope(self.db_engine) as session:
session.add(slc_obj)
except Exception as e:
self.logger.error(Constants.EXCEPTION_OCCURRED.format(e))
raise e
def update_slice(self, *, slc_guid: str, slc_name: | |
<reponame>LourencoFernando/SMS-Project<gh_stars>0
import PySimpleGUI as sg
import interface, arquivo
import webbrowser
from fpdf import FPDF
janela_boas_vindas, janela_principal, janela_salas, janela_professores, janela_alunos, janela_cursos, janela_mensagem = interface.janela_de_boas_vinda(), None, None, None, None, None, None
while True:
window, event, values = sg.read_all_windows()
# Lógica da janela de boas vindas
if window == janela_boas_vindas:
if event == '-ICONE_FACEBOOK-':
webbrowser.open('https://www.facebook.com/profile.php?id=100006375421653')
elif event == '-ICONE_INSTAGRAM-':
webbrowser.open('https://www.instagram.com/lourencofernando1/')
elif event == '-ICONE_YOUTUBE-':
webbrowser.open('https://www.youtube.com/channel/UCDshuKtYFwsgnUwQI647S1g')
elif event == '-BOTAO_ENTRAR_SISTEMA-':
# sg.popup_timed('Janela de Sistema ainda em Desenvolvimeno')
janela_boas_vindas.hide()
janela_principal = interface.janela_menu_casa()
elif event == '-BOTAO_FECHAR-' or event == sg.WIN_CLOSED:
break
# Lógica da jannela Principal
if window == janela_principal:
if event == '-BOTAO REGISTRAR NOME DA ESCOLA-':
try:
if values['-NOME DA ESCOLA-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo em Branco', title='SMS - ERRO')
else:
try:
nome = 'arquivostxt\\Nome da Escola.txt'
arquivo.criarArquivo(nome, values['-NOME DA ESCOLA-'])
janela_principal.close()
janela_principal = interface.janela_menu_casa()
except:
sg.theme('DarkRed1')
sg.Popup('ERRO ao Registar', title='SMS - ERRO')
except:
sg.theme('DarkRed1')
sg.Popup('Impossíveç proseguir', title='SMS - ERRO')
if event == '-ICONE_SALA-':
janela_principal.hide()
janela_salas = interface.janela_menu_salas()
elif event == '-ICONE_PROFESSORES-':
janela_principal.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_ALUNOS-':
janela_principal.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CURSOS-':
janela_principal.hide()
janela_cursos = interface.janela_menu_cursos()
elif event == '-ICONE_MENSAGEM-':
janela_principal.hide()
janela_mensagem = interface.janela_menu_mensagem()
elif event == sg.WIN_CLOSED:
break
# Lógica da jannela das Salas
if window == janela_salas:
if event == '-BOTAO_CADASTRAR_NOVA_SALA-':
if values['-NOME DA SALA-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo em Branco', title='SMS - ERRO')
else:
try:
salas_cadastradas = 'arquivostxt\\Salas Cadastradas.txt'
arquivo.criarArquivoSala(salas_cadastradas, values['-NOME DA SALA-'])
arquivo_salas = open(salas_cadastradas, 'rt', encoding='utf-8')
ler_salas = arquivo_salas.read()
window['-MOSTRAR_SALAS-'].update(ler_salas)
arquivo_salas.close()
arquivo_salas = open(salas_cadastradas, 'rt', encoding='utf-8')
ler_salas = arquivo_salas.readlines()
cont_salas = 0
for salss in ler_salas:
cont_salas += 1
numero_de_salas = cont_salas
window['-SALAS_NUMERO_SALAS-'].update(f'{numero_de_salas} Salas Cdastradas')
window['-NOME DA SALA-'].update('')
except:
sg.theme('DarkRed1')
sg.Popup('ERRO ao Cadastrar', title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup_timed('Cadastro efetuado com SUCESSO!')
elif event == '-ICONE_CASA-':
janela_salas.hide()
janela_principal = interface.janela_menu_casa()
elif event == '-ICONE_SALA-':
janela_salas.hide()
janela_salas = interface.janela_menu_salas()
elif event == '-ICONE_PROFESSORES-':
janela_salas.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_ALUNOS-':
janela_salas.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CURSOS-':
janela_salas.hide()
janela_cursos = interface.janela_menu_cursos()
elif event == '-ICONE_MENSAGEM-':
janela_salas.hide()
janela_mensagem = interface.janela_menu_mensagem()
elif event == sg.WIN_CLOSED:
break
# Lógica da jannela Professores
if window == janela_professores:
if event == '-BOTAO_CADASTRAR_NOVO_DOCENTE-':
if values['-NOME DO DOCENTE-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Nome Docente\" em Branco', title='SMS - ERRO')
elif values['-DOCENTE_DISCIPLINAS-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Disciplina a lecionar\" em Branco', title='SMS - ERRO')
else:
try:
docentes_cadastrados = 'arquivostxt\\Docentes Cadastrados.txt'
arquivo.criarArquivoDocente(docentes_cadastrados, values['-NOME DO DOCENTE-'], values['-DOCENTE_DISCIPLINAS-'])
arquivo_docentes = open(docentes_cadastrados, 'rt', encoding='utf-8')
ler_docentes = arquivo_docentes.read()
window['-MOSTRAR DOCENTES-'].update(ler_docentes)
arquivo_docentes.close()
arquivo_docentes = open(docentes_cadastrados, 'rt', encoding='utf-8')
ler_docentes = arquivo_docentes.readlines()
cont_docentes = 0
for salss in ler_docentes:
cont_docentes += 1
numero_de_docentes = cont_docentes
window['-DOCENTES_NUMERO_DOCENTES-'].update(f'{numero_de_docentes} Docentes Cadastrados')
except:
sg.theme('DarkRed1')
sg.Popup(f'Ocorreu um erro ao Cadastrar o Docentes \"{values["-NOME DO DOCENTE-"]}\" :(',
title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup_timed(f'Cadastro de \"{values["-NOME DO DOCENTE-"]}\" efetuado com SUCESSO!')
if event == '-BOTAO_GERAR_PDF_DOCENTE-':
if values['-NOME DO DOCENTE-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!', title='SMS - ERRO')
elif values['-DOCENTE_DISCIPLINAS-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!', title='SMS - ERRO')
elif values['-SALÁRIO_BASE_DOCENTE-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!', title='SMS - ERRO')
elif values['-TURMAS_DOCENTE-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!', title='SMS - ERRO')
else:
try:
arquivo.GerarPDF(values['-NOME DO DOCENTE-'], values['-SALÁRIO_BASE_DOCENTE-'], values['-TURMAS_DOCENTE-'], values['-DOCENTE_DISCIPLINAS-'])
except UnicodeEncodeError:
sg.theme('DarkRed1')
sg.popup('ERRO ao gerar pdf!! :(\nCausa: UnicodeEncodeError\nPossível solução:\nPor favor não introduza o simbolo monetário no campo \"Salário Base\"', title='SMS - ERRO')
except:
sg.theme('DarkRed1')
sg.popup('ERRO ao gerar pdf!! :(\nCausa: fpdf.errors.FPDFException\nPossível Solução\nTroque o conteúdo do Campo \"Nome do Docente\", clique no botão \"update\"(para que a tela seja atualizada) ou REINICIE o programa\nMotivo: Já existe um pdf com esse mesmo nome\nContent cannot be added on a closed document :(', title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup(f'PDF gerado com SUCESSO!\n O ficheiro encontra-se arquivado na pasta\"arquivospdf\Docente\" do SMS. :)')
elif event == '-atualizar_docentes-':
janela_professores.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_CASA-':
janela_professores.hide()
janela_principal = interface.janela_menu_casa()
elif event == '-ICONE_SALA-':
janela_professores.hide()
janela_salas = interface.janela_menu_salas()
elif event == '-ICONE_PROFESSORES-':
janela_professores.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_ALUNOS-':
janela_professores.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CURSOS-':
janela_professores.hide()
janela_cursos = interface.janela_menu_cursos()
elif event == '-ICONE_MENSAGEM-':
janela_professores.hide()
janela_mensagem = interface.janela_menu_mensagem()
elif event == sg.WIN_CLOSED:
break
# Lógica da jannela Alunos
if window == janela_alunos:
if event == '-BOTAO_CADASTRAR_NOVO_ALUNO-':
if values['-NOME DO ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Nome do Aluno\" em Branco', title='SMS - ERRO')
elif values['-IDADE_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Idade\" em Branco', title='SMS - ERRO')
elif arquivo.leiaint(values['-IDADE_ALUNO-']) is False:
sg.theme('DarkRed1')
sg.popup('ERRO: Por favor digite um número inteiro válido')
else:
try:
alunos_cadastrados = 'arquivostxt\\Alunos Cadastrados.txt'
arquivo.criarArquivoAlunos(alunos_cadastrados, values['-NOME DO ALUNO-'], values['-IDADE_ALUNO-'])
arquivo_alunos = open(alunos_cadastrados, 'rt', encoding='utf-8')
ler_alunos = arquivo_alunos.read()
window['-MOSTRAR ALUNOS-'].update(ler_alunos)
arquivo_alunos.close()
arquivo_alunos = open(alunos_cadastrados, 'rt', encoding='utf-8')
ler_alunos = arquivo_alunos.readlines()
cont_alunos = 0
for alunos in ler_alunos:
cont_alunos += 1
numero_de_alunos = cont_alunos
window['-ALUNOS_NUMERO_ALUNOS-'].update(f'{numero_de_alunos} Alunos Cadastrados')
except:
sg.theme('DarkRed1')
sg.Popup(f'Ocorreu um erro ao Cadastrar o aluno \"{values["-NOME DO ALUNO-"]}\" :(', title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup_timed(f'Cadastro de \"{values["-NOME DO ALUNO-"]}\" efetuado com SUCESSO!')
if event == '-BOTAO_GERAR_PDF_ALUNOS-':
if values['-NOME DO ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Nome do Aluno\"', title='SMS - ERRO')
elif values['-DATA_NASCIMENTO_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!! \n Campo\"Data de Nascimento\"', title='SMS - ERRO')
elif values['-DATA_NASCIMENTO_ALUNO-'] =='d-m-a':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!! \n Campo\"Data de Nascimento\"',
title='SMS - ERRO')
elif arquivo.leiaint(values['-IDADE_ALUNO-']) is False:
sg.theme('DarkRed1')
sg.popup('ERRO: Por favor digite um número inteiro válido.\n Campo\"Idade\"')
elif values['-IDADE_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Idade\"', title='SMS - ERRO')
elif values['-DOCUMENTO_IDENTIFICAÇÃO_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Nº Documento de Identificação\"', title='SMS - ERRO')
elif values['-MORADA_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Morada\"', title='SMS - ERRO')
elif values['-POSTAL1-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Código postal\"', title='SMS - ERRO')
elif values['-POSTAL2-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Código postal\"', title='SMS - ERRO')
elif values['-TELEFONE_ALUNO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Deve preencher corretamente TODOS os Campos!!\n Campo\"Telefone\"', title='SMS - ERRO')
else:
try:
arquivo.GerarPdfAlunos(values['-NOME DO ALUNO-'], values['-DATA_NASCIMENTO_ALUNO-'], values['-IDADE_ALUNO-'], values['-DOCUMENTO_IDENTIFICAÇÃO_ALUNO-'], values['-TELEFONE_ALUNO-'], values['-MORADA_ALUNO-'], values['-POSTAL1-'], values['-POSTAL2-'])
except:
sg.theme('DarkRed1')
sg.popup('ERRO ao gerar pdf!! :(\nCausa: fpdf.errors.FPDFException\nPossível Solução\nTroque o conteúdo do Campo \"Nome do Docente\", clique no botão \"update\"(para que a tela seja atualizada) ou REINICIE o programa\nMotivo: Já existe um pdf com esse mesmo nome\nContent cannot be added on a closed document :(', title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup(f'PDF gerado com SUCESSO!\n O ficheiro encontra-se arquivado na pasta\"arquivospdf\ alunos\" do SMS. :)')
elif event == '-atualizar_alunos-':
janela_alunos.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CASA-':
janela_alunos.hide()
janela_principal = interface.janela_menu_casa()
elif event == '-ICONE_SALA-':
janela_alunos.hide()
janela_salas = interface.janela_menu_salas()
elif event == '-ICONE_PROFESSORES-':
janela_alunos.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_ALUNOS-':
janela_alunos.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CURSOS-':
janela_alunos.hide()
janela_cursos = interface.janela_menu_cursos()
elif event == '-ICONE_MENSAGEM-':
janela_alunos.hide()
janela_mensagem = interface.janela_menu_mensagem()
elif event == sg.WIN_CLOSED:
break
# Lógica da jannela Cursos
if window == janela_cursos:
if event == '-BOTAO_CADASTRAR_NOVO_CURSO-':
if values['-NOME_DO_CURSO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Curso\" em Branco', title='SMS - ERRO')
elif values['-TURNO_DO_CURSO-'] == '':
sg.theme('DarkRed1')
sg.Popup('Não deve deixar o campo \"Turnos\" em Branco', title='SMS - ERRO')
else:
try:
cursos_cadastrados = 'arquivostxt\\Cursos Cadastrados.txt'
arquivo.criarArquivoCursos(cursos_cadastrados, values['-NOME_DO_CURSO-'], values['-TURNO_DO_CURSO-'])
arquivo_cursos = open(cursos_cadastrados, 'rt', encoding='utf-8')
ler_cursos = arquivo_cursos.read()
window['-MOSTRAR_CURSOS-'].update(ler_cursos)
arquivo_cursos.close()
arquivo_cursos = open(cursos_cadastrados, 'rt', encoding='utf-8')
ler_cursos = arquivo_cursos.readlines()
cont_cursos = 0
for cursos in ler_cursos:
cont_cursos += 1
numero_de_cursos = cont_cursos
window['-CURSOS_NUMERO_CURSOS-'].update(f'{numero_de_cursos} Cursos Cadastrados')
except:
sg.theme('DarkRed1')
sg.Popup(f'Ocorreu um erro ao Cadastrar o Curso \"{values["-NOME_DO_CURSO-"]}\" :(', title='SMS - ERRO')
else:
sg.theme('DefaultNoMoreNagging')
sg.popup_timed(f'Cadastro efetuado com SUCESSO!')
elif event == '-ICONE_CASA-':
janela_cursos.hide()
janela_principal = interface.janela_menu_casa()
elif event == '-ICONE_SALA-':
janela_cursos.hide()
janela_salas = interface.janela_menu_salas()
elif event == '-ICONE_PROFESSORES-':
janela_cursos.hide()
janela_professores = interface.janela_menu_docentes()
elif event == '-ICONE_ALUNOS-':
janela_cursos.hide()
janela_alunos = interface.janela_menu_aluno()
elif event == '-ICONE_CURSOS-':
janela_cursos.hide()
janela_cursos = interface.janela_menu_cursos()
elif event == '-ICONE_MENSAGEM-':
janela_cursos.hide()
janela_mensagem = interface.janela_menu_mensagem()
elif event == sg.WIN_CLOSED:
break
# Lógica da jannela Mensagens
if window | |
<gh_stars>0
"""Elements reflecting output objects."""
import datetime as dt
import functools
import os
import smtplib
import sqlalchemy as sql
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .database import Database
from .record import Record
from .utils import py_dir
def you_shall_not_pass(func):
"""Prevent access to an inactive output."""
@functools.wraps(func)
def wrapper(output, *args, **kwargs):
if output.status is True:
func(output, *args, **kwargs)
return wrapper
class Output():
"""Parent class for all outputs.
Parameters
----------
status : bool, optional
Used to open or close the output.
Arguments
---------
status : bool
Status for this particular output.
"""
def __init__(self, status=False):
self._status = status
pass
@property
def status(self):
"""Get output status."""
return self._status
def open(self):
"""Make this output active."""
self._status = True
pass
def close(self):
"""Make this output inactive."""
self._status = False
pass
class Branch(Output):
"""Output branch.
Parent class for each low-level output object e.g. console, file, email,
database table, and HTML document.
Parameters
----------
root : Output
Used to set `root` attribute.
status : bool
The argument that is used to enable or disable output.
Attributes
----------
root : Root
Low-level output that is a root of this branch.
status : bool
Status for this particular output.
"""
def __init__(self, root, status=False):
super().__init__(status=status)
self._root = root
pass
@property
def root(self):
"""Low-level output that is a root of this branch."""
return self._root
class Root(Output):
"""Output root.
This class represents the output root - low-level output object that is
literally a bridge between logger inputs and high-level outputs like
console. file, email, database table and HTML document.
Constructor of this class also creates high-level outputs as `Root`
attributes like `console`, `file`, `email`, `html` and `table`.
Parameters
----------
logger : Logger
The `Logger` that owns that output root.
status : bool, optional
Used to open or close the output.
console : bool, optional
Used for `status` argument of `Console` class.
file : bool, optional
Used for `status` argument of `File` class.
email : bool, optional
Used for `status` argument of `Email` class.
html : bool, optional
Used for `status` argument of `HTML` class.
table : bool, optional
Used for `status` argument of `Table` class.
status : bool, optional
The overall status of the `Root`.
directory : str, optional
Used for `dir` argument of `File` class.
filename : str, optional
Used for `name` argument of `File` class.
extension : str, optional
Used for `extension` argument of `File` class.
smtp : dict, optional
Used to pass `address`, `host`, `port`, `tls`, `user`,
`password` and `recipients` arguments to `Email` class.
db : dict, optional
Used to pass `vendor`, `host`, `port`, `sid`, `user`,
`password`, `schema`, `table`, `proxy`, `db` and `date_column`
arguments to `Table` class.
Attributes
----------
status : bool
Status for this particular output.
logger : Logger
The `Logger` object that owns that output.
console : Console
The `Console` object output.
file : File
The `File` object output.
email : Email
The `Email` object output.
html : HTML
The `HTML` object output.
table : Table
The `Table` object output.
"""
def __init__(self, logger, status=True, console=True, file=True,
email=False, html=False, table=False, directory=None,
filename=None, extension=None, smtp=None, db=None):
super().__init__(status=status)
self.logger = logger
self.console = Console(self, status=console)
path = dict(dir=directory, name=filename, ext=extension)
self.file = File(self, status=file, **path)
self.html = HTML(self, status=html)
smtp = smtp if isinstance(smtp, dict) is True else {}
self.email = Email(self, status=email, **smtp)
db = db if isinstance(db, dict) is True else {}
self.table = Table(self, status=table, **db)
pass
@you_shall_not_pass
def write(self, record):
"""Send received record to all writable outputs.
Parameters
----------
record : str or Record
The data that must be written to writable outputs.
"""
if isinstance(record, Record) is True:
record = record.create()
self.console.write(record)
self.file.write(record)
self.html.write(record)
pass
class Console(Branch):
"""Represents console output.
Parameters
----------
root : Output
Used to set `root` attribute.
status : bool
The argument that is used to enable or disable output.
Attributes
----------
root : Root
Low-level output that is a root of this branch.
status : bool
Status for this particular output.
"""
@you_shall_not_pass
def write(self, record):
"""Write string to console.
Parameters
----------
record : str
The string that must be written to system stdout.
"""
print(record, end='')
pass
class File(Branch):
"""Represents file output.
Parameters
----------
root : Output
Used to set `root` attribute.
status : bool, optional
Used to open or close the output.
dir : str, optional
Used to set `dir` attribute.
name : str, optional
Used to set `name` attribute.
ext : str, optional
Used to set `ext` attribute.
Attributes
----------
root : Root
Low-level output that is a root of this branch.
status : bool
Status for this particular output.
dir : str
The path to folder in which output file must be created.
By default we use the *logs* folder in current location.
name : str
The name of output file. By default we use the string representing
the start date of logging in format *YYYYMMDDHHMISS*.
ext : str
The extension of output file. By default we use *log* extension.
"""
def __init__(self, root, status=True, dir=None, name=None, ext=None):
super().__init__(root, status=status)
dir = dir or os.path.join(py_dir, 'logs')
name = name or '{root.logger.start_date:%Y%m%d%H%M%S}'
ext = ext or 'log'
self.configure(dir=dir, name=name, ext=ext)
pass
@property
def path(self):
"""Return absolute path to output file."""
return self._path
@property
def modified(self):
"""Return last time when file was modified."""
return self._modified
@property
def size(self):
"""Return current file size."""
return self._size
def configure(self, dir=None, name=None, ext=None):
"""Change output file parameters.
Parameters
----------
dir : str, optional
Used to define path to folder in which output file
must be created. By default it will be the *logs* folder in current
location.
name : str, optional
Used to define the name of output file. By default
it will be the string representing the start date of logging in
format *YYYYMMDDHHMISS*.
ext : str, optional
Used to define the extension of output file. By
default we use *log* extension.
"""
if isinstance(dir, str) is True:
self.dir = dir
if isinstance(name, str) is True:
self.name = name
if isinstance(ext, str) is True:
self.ext = ext
if dir is not None or name is not None or ext is not None:
self.new()
pass
@you_shall_not_pass
def new(self):
"""Open new output file."""
# Define new path.
head = self.dir
tail = f'{self.name}.{self.ext}'
datetime = self.root.logger.start_date
path = os.path.join(head, tail)
self._path = path.format(root=self.root, datetime=datetime)
# Handler and file statistics must be purged.
self.__handler = None
self._modified = None
self._size = None
pass
@you_shall_not_pass
def write(self, record):
"""Write data to output file.
Built-in Python file handling will be used.
This method will also:
- Creating output file path if it is not exists yet.
- Updating output file modify time and current size attributes.
Parameters
----------
record : str
The string that must be written to file.
"""
# Create path and open file handler if it is not opened yet.
if self.__handler is None:
# Check the directories.
dirname = os.path.dirname(self._path)
if os.path.exists(dirname) is False:
os.makedirs(dirname)
# Make file.
self.__handler = open(self._path, 'a')
# We should write to handler only string values.
# So if data presented as record.Record() object it must be converted
# to string value by using Record.create() method.
self.__handler.write(record)
self.__handler.flush()
# Update statistics that is requeired for other logger functionality.
self._modified = dt.datetime.now()
self._size = os.stat(self._path).st_size
pass
class Email(Branch):
"""Represents email output.
Gives access to SMTP server and email objects used to send messages,
notifications and alarms.
Parameters
----------
root : Output
Used to set `root` attribute.
status : bool, optional
Used to open or close the output.
address : str, optional
Used to set the `email` attribute.
host : str, optional
Used to set the `host` attribute.
port : str or int, optional
Used to set the `port` attribute.
tls : bool, optional
Used to set `tls` attribute.
user : str, optional
Used to set `user` attribute.
password : str, optional
Used to pass `password` argument to `connect()` method.
recipients : str or list, optional
Used to | |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import collections
import glob
import json
import os
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
import unittest
import warnings
from six import PY3, StringIO, b, text_type
from werkzeug.test import Client
from werkzeug.wrappers import Response
import sass
import sassc
from sassutils.builder import Manifest, build_directory
from sassutils.wsgi import SassMiddleware
if os.sep != '/' and os.altsep:
def normalize_path(path):
path = os.path.abspath(os.path.normpath(path))
return path.replace(os.sep, os.altsep)
def normalize_source_map_path(path):
"""To workaround strange path separators made by libsass ---
which seems a bug of libsass on win32.
"""
return path.replace(os.altsep, '//')
else:
def normalize_path(path):
return path
normalize_source_map_path = normalize_path
A_EXPECTED_CSS = '''\
body {
background-color: green; }
body a {
color: blue; }
'''
A_EXPECTED_CSS_WITH_MAP = '''\
/* line 6, SOURCE */
body {
background-color: green; }
/* line 8, SOURCE */
body a {
color: blue; }
/*# sourceMappingURL=../a.scss.css.map */'''
A_EXPECTED_MAP = {
'version': 3,
'file': 'test/a.css',
'sources': [normalize_source_map_path('test/a.scss')],
'names': [],
'mappings': ';AAKA;EAHE,kBAAkB;;EAIpB,KAAK;IAED,OAAO'
}
B_EXPECTED_CSS = '''\
b i {
font-size: 20px; }
'''
B_EXPECTED_CSS_WITH_MAP = '''\
/* line 2, SOURCE */
b i {
font-size: 20px; }
/*# sourceMappingURL=../css/b.scss.css.map */'''
C_EXPECTED_CSS = '''\
body {
background-color: green; }
body a {
color: blue; }
h1 a {
color: green; }
'''
D_EXPECTED_CSS = '''\
body {
background-color: green; }
body a {
font: '나눔고딕', sans-serif; }
'''
D_EXPECTED_CSS_WITH_MAP = '''\
/* line 6, SOURCE */
body {
background-color: green; }
/* line 8, SOURCE */
body a {
font: '나눔고딕', sans-serif; }
/*# sourceMappingURL=../css/d.scss.css.map */'''
E_EXPECTED_CSS = '''\
a {
color: red; }
'''
SUBDIR_RECUR_EXPECTED_CSS = '''\
body p {
color: blue; }
'''
utf8_if_py3 = {'encoding': 'utf-8'} if PY3 else {}
class SassTestCase(unittest.TestCase):
def test_version(self):
assert re.match(r'^\d+\.\d+\.\d+$', sass.__version__)
def test_output_styles(self):
if hasattr(collections, 'Mapping'):
assert isinstance(sass.OUTPUT_STYLES, collections.Mapping)
assert 'nested' in sass.OUTPUT_STYLES
def test_and_join(self):
self.assertEqual(
'Korea, Japan, China, and Taiwan',
sass.and_join(['Korea', 'Japan', 'China', 'Taiwan'])
)
self.assertEqual(
'Korea, and Japan',
sass.and_join(['Korea', 'Japan'])
)
self.assertEqual('Korea', sass.and_join(['Korea']))
self.assertEqual('', sass.and_join([]))
class CompileTestCase(unittest.TestCase):
def test_compile_required_arguments(self):
self.assertRaises(TypeError, sass.compile)
def test_compile_takes_only_keywords(self):
self.assertRaises(TypeError, sass.compile, 'a { color: blue; }')
def test_compile_exclusive_arguments(self):
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', filename='test/a.scss')
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', dirname='test/')
self.assertRaises(TypeError, sass.compile,
filename='test/a.scss', dirname='test/')
def test_compile_invalid_output_style(self):
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }',
output_style=['compact'])
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', output_style=123j)
self.assertRaises(ValueError, sass.compile,
string='a { color: blue; }', output_style='invalid')
def test_compile_invalid_source_comments(self):
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }',
source_comments=['line_numbers'])
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', source_comments=123j)
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }',
source_comments='invalid')
def test_compile_invalid_image_path(self):
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', image_path=[])
self.assertRaises(TypeError, sass.compile,
string='a { color: blue; }', image_path=123)
def test_compile_string(self):
actual = sass.compile(string='a { b { color: blue; } }')
assert actual == 'a b {\n color: blue; }\n'
commented = sass.compile(string='''a {
b { color: blue; }
color: red;
}''', source_comments=True)
assert commented == '''/* line 1, stdin */
a {
color: red; }
/* line 2, stdin */
a b {
color: blue; }
'''
actual = sass.compile(string=u'a { color: blue; } /* 유니코드 */')
self.assertEqual(
u'''a {
color: blue; }
/* 유니코드 */''',
actual
)
self.assertRaises(sass.CompileError, sass.compile,
string='a { b { color: blue; }')
# sass.CompileError should be a subtype of ValueError
self.assertRaises(ValueError, sass.compile,
string='a { b { color: blue; }')
self.assertRaises(TypeError, sass.compile, string=1234)
self.assertRaises(TypeError, sass.compile, string=[])
def test_compile_string_deprecated_source_comments_line_numbers(self):
source = '''a {
b { color: blue; }
color: red;
}'''
expected = sass.compile(string=source, source_comments=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = sass.compile(string=source,
source_comments='line_numbers')
self.assertEqual(1, len(w))
assert issubclass(w[-1].category, DeprecationWarning)
self.assertEqual(expected, actual)
def test_compile_filename(self):
actual = sass.compile(filename='test/a.scss')
assert actual == A_EXPECTED_CSS
actual = sass.compile(filename='test/c.scss')
assert actual == C_EXPECTED_CSS
actual = sass.compile(filename='test/d.scss')
if text_type is str:
self.assertEqual(D_EXPECTED_CSS, actual)
else:
self.assertEqual(D_EXPECTED_CSS.decode('utf-8'), actual)
actual = sass.compile(filename='test/e.scss')
assert actual == E_EXPECTED_CSS
self.assertRaises(IOError, sass.compile,
filename='test/not-exist.sass')
self.assertRaises(TypeError, sass.compile, filename=1234)
self.assertRaises(TypeError, sass.compile, filename=[])
def test_compile_source_map(self):
filename = 'test/a.scss'
actual, source_map = sass.compile(
filename=filename,
source_map_filename='a.scss.css.map'
)
self.assertEqual(
A_EXPECTED_CSS_WITH_MAP.replace(
'SOURCE',
normalize_path(os.path.abspath(filename))
),
actual
)
self.assertEqual(
A_EXPECTED_MAP,
json.loads(source_map)
)
def test_compile_source_map_deprecated_source_comments_map(self):
filename = 'test/a.scss'
expected, expected_map = sass.compile(
filename=filename,
source_map_filename='a.scss.css.map'
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual, actual_map = sass.compile(
filename=filename,
source_comments='map',
source_map_filename='a.scss.css.map'
)
self.assertEqual(1, len(w))
assert issubclass(w[-1].category, DeprecationWarning)
self.assertEqual(expected, actual)
self.assertEqual(expected_map, actual_map)
def test_regression_issue_2(self):
actual = sass.compile(string='''
@media (min-width: 980px) {
a {
color: red;
}
}
''')
normalized = re.sub(r'\s+', '', actual)
assert normalized == '@media(min-width:980px){a{color:red;}}'
def test_regression_issue_11(self):
actual = sass.compile(string='''
$foo: 3;
@media (max-width: $foo) {
body { color: black; }
}
''')
normalized = re.sub(r'\s+', '', actual)
assert normalized == '@media(max-width:3){body{color:black;}}'
class BuilderTestCase(unittest.TestCase):
def setUp(self):
self.temp_path = tempfile.mkdtemp()
self.sass_path = os.path.join(self.temp_path, 'sass')
self.css_path = os.path.join(self.temp_path, 'css')
shutil.copytree('test', self.sass_path)
def tearDown(self):
shutil.rmtree(self.temp_path)
def test_builder_build_directory(self):
css_path = self.css_path
result_files = build_directory(self.sass_path, css_path)
self.assertEqual(6, len(result_files))
self.assertEqual('a.scss.css', result_files['a.scss'])
with open(os.path.join(css_path, 'a.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual(A_EXPECTED_CSS, css)
self.assertEqual('b.scss.css', result_files['b.scss'])
with open(os.path.join(css_path, 'b.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual(B_EXPECTED_CSS, css)
self.assertEqual('c.scss.css', result_files['c.scss'])
with open(os.path.join(css_path, 'c.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual(C_EXPECTED_CSS, css)
self.assertEqual('d.scss.css', result_files['d.scss'])
with open(os.path.join(css_path, 'd.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual(D_EXPECTED_CSS, css)
self.assertEqual('e.scss.css', result_files['e.scss'])
with open(os.path.join(css_path, 'e.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual(E_EXPECTED_CSS, css)
self.assertEqual(
os.path.join('subdir', 'recur.scss.css'),
result_files[os.path.join('subdir', 'recur.scss')]
)
with open(os.path.join(css_path, 'subdir', 'recur.scss.css'),
**utf8_if_py3) as f:
css = f.read()
self.assertEqual(SUBDIR_RECUR_EXPECTED_CSS, css)
def test_output_style(self):
css_path = self.css_path
result_files = build_directory(self.sass_path, css_path,
output_style='compressed')
self.assertEqual(6, len(result_files))
self.assertEqual('a.scss.css', result_files['a.scss'])
with open(os.path.join(css_path, 'a.scss.css'), **utf8_if_py3) as f:
css = f.read()
self.assertEqual('body{background-color:green}body a{color:blue}',
css)
class ManifestTestCase(unittest.TestCase):
def test_normalize_manifests(self):
manifests = Manifest.normalize_manifests({
'package': 'sass/path',
'package.name': ('sass/path', 'css/path'),
'package.name2': Manifest('sass/path', 'css/path')
})
assert len(manifests) == 3
assert isinstance(manifests['package'], Manifest)
assert manifests['package'].sass_path == 'sass/path'
assert manifests['package'].css_path == 'sass/path'
assert isinstance(manifests['package.name'], Manifest)
assert manifests['package.name'].sass_path == 'sass/path'
assert manifests['package.name'].css_path == 'css/path'
assert isinstance(manifests['package.name2'], Manifest)
assert manifests['package.name2'].sass_path == 'sass/path'
assert manifests['package.name2'].css_path == 'css/path'
def test_build_one(self):
d = tempfile.mkdtemp()
src_path = os.path.join(d, 'test')
test_source_path = lambda *path: normalize_path(
os.path.join(d, 'test', *path)
)
replace_source_path = lambda s, name: s.replace(
'SOURCE',
test_source_path(name)
)
try:
shutil.copytree('test', src_path)
m = Manifest(sass_path='test', css_path='css')
m.build_one(d, 'a.scss')
with open(os.path.join(d, 'css', 'a.scss.css')) as f:
self.assertEqual(A_EXPECTED_CSS, f.read())
m.build_one(d, 'b.scss', source_map=True)
with open(os.path.join(d, 'css', 'b.scss.css'),
**utf8_if_py3) as f:
self.assertEqual(
replace_source_path(B_EXPECTED_CSS_WITH_MAP, 'b.scss'),
f.read()
)
self.assert_json_file(
{
'version': 3,
'file': '../test/b.css',
'sources': [normalize_source_map_path('../test/b.scss')],
'names': [],
'mappings': ';AAAA,EAAE;EAEE,WAAW'
},
os.path.join(d, 'css', 'b.scss.css.map')
)
m.build_one(d, 'd.scss', source_map=True)
with open(os.path.join(d, 'css', 'd.scss.css'),
**utf8_if_py3) as f:
self.assertEqual(
replace_source_path(D_EXPECTED_CSS_WITH_MAP, 'd.scss'),
f.read()
)
self.assert_json_file(
{
'version': 3,
'file': '../test/d.css',
'sources': [normalize_source_map_path('../test/d.scss')],
'names': [],
'mappings': ';AAKA;EAHE,kBAAkB;;EAIpB,KAAK;IAED,MAAM'
},
os.path.join(d, 'css', 'd.scss.css.map')
)
finally:
shutil.rmtree(d)
def assert_json_file(self, expected, filename):
with open(filename) as f:
try:
tree = json.load(f)
except ValueError as e:
f.seek(0)
msg = '{0!s}\n\n{1}:\n\n{2}'.format(e, filename, f.read())
raise ValueError(msg)
self.assertEqual(expected, tree)
class WsgiTestCase(unittest.TestCase):
@staticmethod
def sample_wsgi_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return environ['PATH_INFO'],
def test_wsgi_sass_middleware(self):
css_dir = tempfile.mkdtemp()
src_dir = os.path.join(css_dir, 'src')
shutil.copytree('test', src_dir)
try:
app = SassMiddleware(self.sample_wsgi_app, {
__name__: (src_dir, css_dir, '/static')
})
client = Client(app, Response)
r = client.get('/asdf')
self.assertEqual(200, r.status_code)
self.assert_bytes_equal(b'/asdf', r.data)
self.assertEqual('text/plain', r.mimetype)
r = client.get('/static/a.scss.css')
self.assertEqual(200, r.status_code)
src_path = normalize_path(os.path.join(src_dir, 'a.scss'))
self.assert_bytes_equal(
b(A_EXPECTED_CSS_WITH_MAP.replace('SOURCE', src_path)),
r.data
)
self.assertEqual('text/css', r.mimetype)
r = client.get('/static/not-exists.sass.css')
self.assertEqual(200, r.status_code)
self.assert_bytes_equal(b'/static/not-exists.sass.css', r.data)
self.assertEqual('text/plain', r.mimetype)
finally:
shutil.rmtree(css_dir)
def assert_bytes_equal(self, expected, actual, *args):
self.assertEqual(expected.replace(b'\r\n', b'\n'),
actual.replace(b'\r\n', b'\n'),
*args)
class DistutilsTestCase(unittest.TestCase):
def tearDown(self):
for filename in self.list_built_css():
os.remove(filename)
def css_path(self, *args):
return os.path.join(
os.path.dirname(__file__),
'testpkg', 'testpkg', 'static', 'css',
*args
)
def list_built_css(self):
return glob.glob(self.css_path('*.scss.css'))
def build_sass(self, *args):
testpkg_path = os.path.join(os.path.dirname(__file__), 'testpkg')
return subprocess.call(
[sys.executable, 'setup.py', 'build_sass'] + list(args),
cwd=os.path.abspath(testpkg_path)
)
def test_build_sass(self):
rv = self.build_sass()
self.assertEqual(0, rv)
self.assertEqual(
['a.scss.css'],
list(map(os.path.basename, self.list_built_css()))
)
with open(self.css_path('a.scss.css')) as f:
self.assertEqual(
'p a {\n color: red; }\np b {\n color: blue; }\n',
f.read()
)
def test_output_style(self):
rv = self.build_sass('--output-style', 'compressed')
self.assertEqual(0, rv)
with open(self.css_path('a.scss.css')) as f:
self.assertEqual(
'p a{color:red}p b{color:blue}',
f.read()
)
class SasscTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.err = StringIO()
def test_no_args(self):
exit_code = sassc.main(['sassc', ], self.out, self.err)
self.assertEqual(2, exit_code)
err = self.err.getvalue()
assert err.strip().endswith('error: too few arguments'), \
'actual error message is: ' + repr(err)
self.assertEqual('', self.out.getvalue())
def test_three_args(self):
exit_code = sassc.main(
['sassc', 'a.scss', 'b.scss', 'c.scss'],
self.out, self.err
)
self.assertEqual(2, exit_code)
err = self.err.getvalue()
assert err.strip().endswith('error: too many arguments'), \
'actual error message is: ' + repr(err)
self.assertEqual('', self.out.getvalue())
def test_sassc_stdout(self):
exit_code = sassc.main(['sassc', 'test/a.scss'], self.out, self.err)
self.assertEqual(0, exit_code)
self.assertEqual('', self.err.getvalue())
self.assertEqual(A_EXPECTED_CSS.strip(), self.out.getvalue().strip())
def test_sassc_output(self):
fd, tmp = tempfile.mkstemp('.css')
try:
os.close(fd)
exit_code = sassc.main(['sassc', 'test/a.scss', tmp],
self.out, self.err)
self.assertEqual(0, exit_code)
self.assertEqual('', self.err.getvalue())
self.assertEqual('', self.out.getvalue())
with open(tmp) as f:
self.assertEqual(A_EXPECTED_CSS.strip(), f.read().strip())
finally:
os.remove(tmp)
def test_sassc_output_unicode(self):
| |
<reponame>guoyi118/sparqling-queries
import os
import ast
import unittest
import time
import attr
from timeout_decorator import timeout
import textwrap
from functools import lru_cache
from qdmr2sparql.datasets import QdmrInstance, DatasetBreak, DatasetSpider
from qdmr2sparql.structures import GroundingIndex, GroundingKey, RdfGraph
from qdmr2sparql.structures import QueryResult, QueryToRdf, OutputColumnId
from qdmr2sparql.query_generator import create_sparql_query_from_qdmr
ONE_TEST_TIMEOUT = 120
VIRTUOSO_SPARQL_SERVICE = None
class TestSelect(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_select_table(self):
"""When selecting full table we return the set or primary keys
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?singer
WHERE
{
?singer arc:singer:Singer_ID ?singer.
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_table_grounding("singer"), schema)])
qdmr = QdmrInstance(["select"], [["singers"]])
grounding = {GroundingIndex(0,0,"singers") : GroundingKey.make_table_grounding("singer")}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column(self):
"""When selecting the column we return the items of that column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
?singer arc:singer:Name ?Name.
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select"], [["name"]])
grounding = {GroundingIndex(0,0,"name") : GroundingKey.make_column_grounding("singer", "Name")}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_value(self):
"""When selecting the value we return all etries of that value in that column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?countries
WHERE
{
?singer arc:singer:Country ?countries.
FILTER(?countries = "France"^^xsd:string).
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_value_grounding("singer", "Country", "France"))])
qdmr = QdmrInstance(["select"], [["France"]])
grounding = {GroundingIndex(0,0,"France") : GroundingKey.make_value_grounding("singer", "Country", "France")}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestSelectProject(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_select_table_project_column(self):
"""Select table, project column should return the column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?countries
WHERE
{
?singer arc:singer:Country ?countries.
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Country"))])
qdmr = QdmrInstance(["select", "project"], [["singers"], ["countries", "#1"]])
grounding = { GroundingIndex(0,0,"singers") : GroundingKey.make_table_grounding("singer"),
GroundingIndex(1,0,"countries") : GroundingKey.make_column_grounding("singer", "Country")
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_project_table(self):
"""Select table, project column should return the column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?singer
WHERE
{
?singer arc:singer:Country ?countries.
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_table_grounding("singer"), schema)])
qdmr = QdmrInstance(["select", "project"], [["countries"], ["singers", "#1"]])
grounding = { GroundingIndex(0,0,"countries") : GroundingKey.make_column_grounding("singer", "Country"),
GroundingIndex(1,0,"singers") : GroundingKey.make_table_grounding("singer")
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_project_value(self):
"""Select table, project column should return the column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?countries
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Country ?countries.
FILTER(?countries = "France"^^xsd:string)
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_value_grounding("singer", "Country", "France"))])
qdmr = QdmrInstance(["select", "project"], [["names"], ["France", "#1"]])
grounding = { GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(1,0,"France") : GroundingKey.make_value_grounding("singer", "Country", "France"),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_value_project_column(self):
"""Select table, project column should return the column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Country ?countries.
FILTER(?countries = "France"^^xsd:string)
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select", "project"], [["France"], ["names", "#1"]])
grounding = { GroundingIndex(0,0,"France") : GroundingKey.make_value_grounding("singer", "Country", "France"),
GroundingIndex(1,0,"names") : GroundingKey.make_column_grounding("singer", "Name")
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestDifferentColumnOrder(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_select_project_column_order(self):
"""
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name ?countries
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Country ?countries.
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[
OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name")),
OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Country"))
])
qdmr = QdmrInstance(["select", "project", "union"], [["names"], ["Country", "#1"], ["#2", "#1"]])
grounding = { GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(1,0,"Country") : GroundingKey.make_column_grounding("singer", "Country"),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=False,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestSelectFilter(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_filter_value(self):
"""Select table, filter values based on a value in another column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Country ?countries.
FILTER(?countries = "France"^^xsd:string)
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select", "filter"], [["names"], ["#1", "France"]])
grounding = { GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(1,1,"France") : GroundingKey.make_value_grounding("singer", "Country", "France")
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_filter_with_comparative(self):
"""Select table, filter values based on a value in another column
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Age ?Age.
FILTER(?Age > 32).
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select", "filter"], [["names"], ["#1", "older than 32"]])
grounding = {GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(1,1,"older than 32"): GroundingKey.make_comparative_grounding(">", "32", GroundingKey.make_column_grounding("singer", "Age")),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_filter_with_value_in_another_column(self):
"""Select table, filter values based on a value in another column.
The argument of comparative contains reference to a new column.
"""
rdf_graph, schema = get_graph_and_schema("dev", "car_1")
correct_sparql_query = textwrap.dedent("""\
SELECT ?ID
WHERE
{
?ID arc:cars_data:Weight ?Weight.
?ID arc:cars_data:Year ?Year.
FILTER(?Year > ?Weight).
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_table_grounding("cars_data"), schema)])
qdmr = QdmrInstance(["select", "project", "filter"], [["cars"], ["weights", "#1"], ["#1", "years larger than #2"]])
grounding = {
GroundingIndex(0,0,"cars") : GroundingKey.make_table_grounding("cars_data"),
GroundingIndex(1,0,"weights") : GroundingKey.make_column_grounding("cars_data", "Weight"),
GroundingIndex(2,1,"years larger than #2"): GroundingKey.make_comparative_grounding(">", "#2", GroundingKey.make_column_grounding("cars_data", "Year")),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_filter_superlative(self):
"""Select table, filter values based on a value in another column - with superlative
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name_1
WHERE
{
{
SELECT (min(?Age) AS ?min)
WHERE
{
?singer arc:singer:Age ?Age.
}
}
?singer_1 arc:singer:Age ?Age_1.
?singer_1 arc:singer:Name ?Name_1.
FILTER(?Age_1 = ?min).
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select", "filter"], [["names"], ["#1", "the youngest"]])
grounding = { GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(1,1,"the youngest") : GroundingKey.make_comparative_grounding("min", None, GroundingKey.make_column_grounding("singer", "Age")),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestSelectProjectComparative(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_project_another_column_compare_value(self):
"""Select table, filter values based on a value in another column based on project-comparative
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
?singer arc:singer:Name ?Name.
?singer arc:singer:Country ?countries.
FILTER(?countries != "France"^^xsd:string)
}""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Name"))])
qdmr = QdmrInstance(["select", "project", "comparative"], [["names"], ["countries", "#1"], ["#1", "#2", "not from France"]])
grounding = { GroundingIndex(1,0,"countries") : GroundingKey.make_column_grounding("singer", "Country"),
GroundingIndex(0,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(2,2,"not from France"): GroundingKey.make_comparative_grounding("!=", "France"),
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_select_column_project_compare_with_another_column(self):
"""Select table, filter values based on a value in another column based on project-comparative
"""
rdf_graph, schema = get_graph_and_schema("dev", "concert_singer")
correct_sparql_query = textwrap.dedent("""\
SELECT ?Country
WHERE
{
?singer arc:singer:Country ?Country.
?singer arc:singer:Name ?Name.
?singer arc:singer:Age ?Age.
FILTER(?Age > 32).
}
GROUP BY ?Country""")
correct_sparql_query = QueryToRdf(query=correct_sparql_query,
output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding("singer", "Country"))])
qdmr = QdmrInstance(["select", "project", "comparative"], [["countries"], ["names", "#1"], ["#1", "#2", "older than 32"]])
grounding = { GroundingIndex(0,0,"countries") : GroundingKey.make_column_grounding("singer", "Country"),
GroundingIndex(1,0,"names") : GroundingKey.make_column_grounding("singer", "Name"),
GroundingIndex(2,2,"older than 32"): GroundingKey.make_comparative_grounding(">", "32", GroundingKey.make_column_grounding("singer", "Age")),
"distinct": ["#1"],
}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, | |
<gh_stars>1-10
#!/usr/bin/env python
from bearlibterminal import terminal as blt
import os
import sys
import math
from random import choice, randrange, random
from collections import defaultdict
from textwrap import wrap
from time import sleep
import string
import shelve
from copy import copy #, deepcopy
from enum import Enum, auto
"""
HoS
"""
HEIGHT = 16
WIDTH = 38
AUTO_BATTLE = 11
END_MOVE=900
SIZE = 24
SLP = 0.01
SEQ_TYPES = (list, tuple)
debug_log = open('debug', 'w')
board_grid = []
castle_boards = {}
ai_heroes = []
ai_buildings = []
player_heroes = []
player_buildings = []
players = []
castles = []
keymap = dict(
[
[ blt.TK_ESCAPE, 'ESCAPE' ],
[ blt.TK_RETURN, 'ENTER' ],
[ blt.TK_PERIOD, "." ],
[ blt.TK_SHIFT, 'SHIFT' ],
[ blt.TK_UP, "UP" ],
[ blt.TK_DOWN, "DOWN" ],
[ blt.TK_RIGHT, "RIGHT" ],
[ blt.TK_LEFT, "LEFT" ],
[ blt.TK_MOUSE_LEFT, "CLICK" ],
[ blt.TK_Q, 'q' ],
[ blt.TK_W, 'w' ],
[ blt.TK_E, 'e' ],
[ blt.TK_R, 'r' ],
[ blt.TK_T, 't' ],
[ blt.TK_Y, 'y' ],
[ blt.TK_U, 'u' ],
[ blt.TK_I, 'i' ],
[ blt.TK_O, 'o' ],
[ blt.TK_P, 'p' ],
[ blt.TK_A, 'a' ],
[ blt.TK_S, 's' ],
[ blt.TK_D, 'd' ],
[ blt.TK_F, 'f' ],
[ blt.TK_G, 'g' ],
[ blt.TK_H, 'h' ],
[ blt.TK_J, 'j' ],
[ blt.TK_K, 'k' ],
[ blt.TK_L, 'l' ],
[ blt.TK_Z, 'z' ],
[ blt.TK_X, 'x' ],
[ blt.TK_C, 'c' ],
[ blt.TK_V, 'v' ],
[ blt.TK_B, 'b' ],
[ blt.TK_N, 'n' ],
[ blt.TK_M, 'm' ],
[ blt.TK_1, '1' ],
[ blt.TK_2, '2' ],
[ blt.TK_3, '3' ],
[ blt.TK_4, '4' ],
[ blt.TK_5, '5' ],
[ blt.TK_6, '6' ],
[ blt.TK_7, '7' ],
[ blt.TK_8, '8' ],
[ blt.TK_9, '9' ],
[ blt.TK_0, '0' ],
[ blt.TK_COMMA, ',' ],
[ blt.TK_SPACE, ' ' ],
[ blt.TK_MINUS, '-' ],
[ blt.TK_EQUALS, '=' ],
[ blt.TK_SLASH, '/' ],
]
)
noto_tiles = """fountain
sailboat
snowman
snowflake
water
tree1
tree2
palm
cactus
flower1
flower2
flower3
flower4
flower5
leaf1
leaf2
leaf3
guitar
trumpet
monkey
elephant
soldier
card
chair
PC
sharp-rock1
sharp-rock2
book1
book2
book3
book4
book5
key
seal
truck
ship
flag
man
girl
door
bull
cow
""".split()
noto_tiles = {k: 0xe300+n for n,k in enumerate(noto_tiles)}
class ObjectsClass:
def __init__(self):
self.objects = {}
def __setitem__(self, k, v):
self.objects[getattr(ID, k)] = v
def __getitem__(self, k):
return self.objects[getattr(ID, k)]
def __getattr__(self, k):
return self.objects[getattr(ID, k)]
def get(self, k, default=None):
id = getattr(ID, k, None)
return self.objects.get(id)
def get_by_id(self, id):
return self.objects.get(id)
def set_by_id(self, id, v):
self.objects[id] = v
Objects = ObjectsClass()
class ID(Enum):
castle1 = auto()
castle2 = auto()
castle3 = auto()
castle4 = auto()
castle5 = auto()
hero1 = auto()
hero2 = auto()
hero3 = auto()
hero4 = auto()
hero5 = auto()
hero6 = auto()
hero7 = auto()
hero8 = auto()
hero9 = auto()
hero10 = auto()
power_bolt = auto()
shield_spell = auto()
gold = auto()
wood = auto()
ore = auto()
mercury = auto()
sulphur = auto()
raised_platform = auto()
hero_names = {ID.hero1:'Arcachon', ID.hero2:'Carcassonne', ID.hero3:'Troyes', ID.hero4:'Sault'}
class Player:
resources = {
ID.gold: 3000,
ID.wood: 20,
ID.ore: 20,
ID.mercury: 0,
ID.sulphur: 0,
}
def __init__(self, name, is_ai, color):
self.name, self.is_ai, self.color = name, is_ai, color
self.is_human = not is_ai
self._heroes = []
self._castles = []
def __repr__(self):
return f'<P: {self.name}>'
@property
def heroes(self):
l = (Objects.get_by_id(id) for id in self._heroes)
return (h for h in l if h.alive)
@property
def castles(self):
return (Objects.get_by_id(id) for id in self._castles)
class Type(Enum):
door1 = auto()
container = auto()
blocking = auto()
gate = auto()
castle = auto()
peasant = auto()
pikeman = auto()
cavalier = auto()
archer = auto()
griffin = auto()
centaur = auto()
melee_attack = auto()
magic_attack = auto()
hero = auto()
building = auto()
raised_platform = auto()
class Blocks:
"""All game tiles."""
jousting_ground = '\u25ed'
archers_tower = '\u0080'
griffin_tower = '\u0081'
centaur_stables = '\u0082'
cavalier_l = '\u0007'
cavalier_r = '\u0008'
archer_r = '\u000c'
archer_l = '\u000b'
griffin_r = '\u000e'
griffin_l = '\u000f'
centaur_l = '\u0011'
centaur_r = '\u0012'
arrow_r = '\u20e9'
arrow_l = '\u20ea'
gold = '\u009e'
sawmill = '\u0239'
large_circle = '\u20dd'
shield_spell = '\u0709'
button_platform = '\u20e3'
bricks = '\u2687'
blank = '.'
rock = '█'
door = '⌸'
water = '≋'
tree1 = '\u2689'
tree2 = '\u268a'
rock2 = '▅'
rock3 = '░'
peasant = '\u23f2'
pikeman = '\u23f3'
hero1_l = '\u0003'
hero1_r = '\u0004'
rubbish = '⛁'
cursor = '𐌏'
hut = '△'
guardhouse = '⌂'
sub_plus = '\u208a'
sub = [
'\u2080',
'₁',
'₂',
'₃',
'₄',
'₅',
'₆',
'₇',
'₈',
'₉'
]
sub2 = [
'\u20bb',
'\u20bc',
'\u20bd',
'\u20be',
'\u20bf',
'\u20c0',
'\u20c1',
'\u20c2',
'\u20c3',
'\u20c4',
]
list_select = '▶'
hero_select = '\u2017'
# spells
bolt1 = '\u16ca'
bolt2 = '\u16cb'
spell_select = '\u229b'
BLOCKING = [Blocks.rock, Type.door1, Type.blocking, Type.gate, Type.castle]
class Misc:
day = 1
week = 1
status = []
current_unit = None
player = None
hero = None
def mkcell():
return [Blocks.blank]
def mkrow():
return [mkcell() for _ in range(WIDTH)]
def first(x):
x=tuple(x)
return x[0] if x else None
def last(x):
x=tuple(x)
return x[-1] if x else None
def chk_oob(loc, x=0, y=0):
return _chk_oob(loc,x,y)[0]
def _chk_oob(loc, x=0, y=0):
"""Returns OOB, and also which axis is OOB (returns False for OOB, True for ok)."""
Y, X = (0 <= loc.y+y <= HEIGHT-1,
0 <= loc.x+x <= WIDTH-1)
return X and Y, X, Y
def chk_b_oob(loc, x=0, y=0):
h = len(board_grid)
w = len(board_grid[0])
newx, newy = loc.x+x, loc.y+y
return 0 <= newy <= h-1 and 0 <= newx <= w-1
def debug(*args):
debug_log.write(str(args) + '\n')
debug_log.flush()
print=debug
def blt_put_obj(obj, loc=None):
x,y=loc or obj.loc
x = x*2 +(0 if y%2==0 else 1)
blt.clear_area(x,y,1,1)
puts(x, y, obj)
refresh()
def pad_none(lst, size):
return lst + [None]*(size-len(lst))
def dist(a,b):
a = getattr(a,'loc',a)
b = getattr(b,'loc',b)
return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.x-b.x)*(a.y-b.y))
def getitem(it, ind=0, default=None):
try: return it[ind]
except IndexError: return default
def puts(x, y, text):
_puts(x, y, text)
def puts2(x, y, text):
_puts(x, y+HEIGHT, text)
def _puts(x,y,a):
if isinstance(a,str):
blt.puts(x,y,a)
elif a._str:
# combined glyps
for _s in a._str():
if a.color:
_s = f'[color={a.color}]{_s}[/color]'
blt.puts(x,y,_s)
else:
if a.color:
a = f'[color={a.color}]{a}[/color]'
blt.puts(x,y,str(a))
def refresh():
blt.refresh()
def get_and_parse_key():
while 1:
k = parsekey(blt.read())
if k!='SHIFT':
return k
def parsekey(k):
b=blt
valid = (b.TK_RETURN, b.TK_SHIFT, b.TK_ESCAPE, b.TK_UP, b.TK_DOWN, b.TK_RIGHT, b.TK_LEFT, b.TK_MOUSE_LEFT)
if k and blt.check(blt.TK_WCHAR) or k in valid:
k = keymap.get(k)
if k and blt.state(blt.TK_SHIFT):
k = k.upper()
if k=='-': k = '_'
if k=='/': k = '?'
if k=='=': k = '+'
return k
def get_mouse_pos():
return blt.state(blt.TK_MOUSE_X), blt.state(blt.TK_MOUSE_Y)
def board_setup():
Boards.b_1 = Board(Loc(0,0), '1')
Boards.b_1.board_1()
Boards.b_2 = Board(Loc(1,0), '2')
Boards.b_2.board_2()
Boards.b_3 = Board(Loc(0,1), '3')
Boards.b_3.board_3()
Boards.b_4 = Board(Loc(1,1), '4')
Boards.b_4.board_4()
board_grid[:] = [
['1', '2', None],
['3', '4', None],
]
Misc.B = Boards.b_1
def manage_castles():
l = [Objects.get_by_id(id) for id in castles]
p_castles = [c for c in l if c.player==Misc.player]
if not p_castles:
Misc.hero.talk(Misc.hero, 'You have no castles!')
return
x, y = 5, 1
ascii_letters = string.ascii_letters
lst = []
for n, c in enumerate(p_castles):
lst.append(f' {ascii_letters[n]}) {c.name}')
w = max(len(l) for l in lst)
blt.clear_area(x, y, w+2, len(lst))
for n, l in enumerate(lst):
puts(x, y+n, l)
refresh()
ch = get_and_parse_key()
if ch and ch in ascii_letters:
try:
castle = p_castles[string.ascii_letters.index(ch)]
except IndexError:
return
castle.town_ui()
def stats(castle=None, battle=False):
pl = Misc.player
if not pl: return
h = Misc.hero
if battle and Misc.current_unit:
u = Misc.current_unit
move, speed = u.cur_move, u.speed
elif h:
move, speed = h.cur_move, h.speed
res = pl.resources
s=''
for r in 'gold wood ore mercury sulphur'.split():
id = getattr(ID, r)
s+= f'[{r.title()}:{res.get(id)}]'
st = s + f' | Move {move}/{speed} | {Misc.B._map}'
x = len(st)+2
puts2(1,0,blt_esc(st))
puts2(x,0, h.name)
x+= len(h.name) + 2
y = 1
if castle:
x = 1
for a in castle.army:
puts2(x+1 if a else x,
y,
a or blt_esc('[ ]')
)
x+=3
y+=1
x = 1
for a in h.army:
puts2(x+1 if a else x,
y,
a or blt_esc('[ ]'))
x+=3
puts2(x+2, y, f'w{Misc.week}/d{Misc.day}')
refresh()
def status(msg):
Misc.status.append(msg)
def blt_esc(txt):
return txt.replace('[','[[').replace(']',']]')
def prompt():
mp = ''
status('> ')
blt.refresh()
while 1:
k = get_and_parse_key()
if not k: continue
if k=='ENTER':
return mp
mp += k
status('> '+mp)
blt.refresh()
class Loc:
def __init__(self, x, y):
self.y, self.x = y, x
def __iter__(self):
yield self.x
yield self.y
def __getitem__(self, i):
return (self.x, self.y)[i]
def __repr__(self):
return str((self.x, self.y))
def __eq__(self, l):
if isinstance(l, Loc):
return (self.x, self.y) == (l.x, l.y)
def __hash__(self):
return hash(tuple(self))
def mod(self, x=0, y=0):
new = copy(self)
new.y += y
new.x += x
return new
def | |
الأساسي',
'Group Title': 'عنوان المجموعة',
'Group Type': 'نوع المجموعة',
'Group Types': 'أنواع المجموعة',
'Group updated': 'تجديد المجموعة',
'Grouped by': 'مجمعة حسب',
'Groups': 'المجموعات',
'Groups removed': 'تمت إزالة المجموعات',
'Hair Color': 'لون الشعر',
'Hair Comments': 'الشعر تعليقات',
'Hair Length': 'طول الشعر',
'Hair Style': 'أسلوب الشعر',
'Hand Washing Facilities': 'مرافق غسل اليدين',
'Has additional rights to modify records relating to this Organization or Site.': 'لديه حقوق إضافية لتعديل السجلات المتعلقة بهذه المنظمة أو الموقع.',
'Has the Certificate for receipt of the shipment been given to the sender?': 'هل لديه شهادة مسلمة للمرسِل لاستلام الشحنة؟',
'Has the GRN (Goods Received Note) been completed?': 'هل تم الانتهاء من تسجيل السلع المستلمة ؟',
'Hazard': 'مخاطر',
'Hazard added': 'تم إضافة المخاطر',
'Hazard added to Project': 'المخاطر المضافة إلى المشروع',
'Hazard deleted': 'تم حذف الخطر',
'Hazard Details': 'تفاصيل الخطر',
'Hazard removed from Project': 'الخطر تم إزالتها من مشروع',
'Hazard updated': 'الخطر تم تحديثها',
'Hazards': 'المخاطر',
'Head of Household': 'رب الأسرة',
'Head of Household Date of Birth': 'تاريخ ميلاد رب الاسرة',
'Head of Household Gender': 'جنس رب الاسرة',
'Head of Household Name': '<NAME>',
'Head of Household Relationship': 'العلاقة مع رب الاسرة',
'Headquarters': 'المقر الرئيسي',
'Health': 'الصحة',
'Health & Health Facilities': 'المرافق الصحية والصحة',
'Health Awareness, Promotion': 'الترويج و الوعي الصحي',
'Health care assistance, Rank': 'دعم الرعاية الصحية،مرتبة ',
'Health center': 'مركز صحي',
'Health center without beds': 'مركز صحي بدون أسرة',
'Health Facilities - Construction and Operation': 'المرافق الصحية - البناء والتشغيل',
'Health Insurance': 'تأمين صحي',
'Health Policy, Strategy Development': 'سياسة الصحة والتنمية الاستراتيجية',
'Heat Wave': 'موجة حر شديد',
'Height': 'الإرتفاع',
'Height (cm)': 'الإرتفاع (سم)',
'Height (m)': 'الإرتفاع (م)',
'Help': 'مساعدة',
'here': 'هنا',
'HFA': 'إطار عمل هيوغو',
'HFA Priorities': 'الأولويات إطار عمل هيوغو',
'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: تأكد من أن الحد من مخاطر الكوارث هو أولوية وطنية ومحلية قائمة على قاعدة مؤسسية صلبة للتنفيذ.',
'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: تحديد وتقييم ورصد مخاطر الكوارث وتعزيز الإنذار المبكر.',
'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: استخدام المعرفة والابتكار والتعليم لبناء ثقافة للسلامة والتأقلم على جميع المستويات.',
'HFA4: Reduce the underlying risk factors.': 'HFA4: تقليل عوامل الخطر الأساسية.',
'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: تعزيز التأهب للكوارث للاستجابة الفعالة على جميع المستويات.',
'Hide': 'إخفاء',
'Hide Details': 'أخف التفاصيل',
'Hide Form': 'إخفاء نموذج',
'Hide Map': 'إخفاء الخريطة',
'Hide Table': 'إخفاء الجدول',
'Hierarchy Level 1 Name (e.g. State or Province)': 'إسم المستوى 1 من التسلسل الهرمي (مثال : ناحية أو ولاية)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'اسم التسلسل الهرمي للمستوى 3 ا(مثل المدينة / البلدة / القرية)',
'High': 'عالية',
'Highest Priority Open Requests': 'أعلى أولوية الطلبات المفتوحة',
'Hindu': 'الهندوسي',
'Hit the back button on your browser to try again.': 'اضغط زر الرجوع في المتصفح الخاص بك لإعادة المحاولة.',
'Home': 'الصفحة الرئيسية',
'Home Address': 'عنوان المنزل',
'Home Country': 'الوطن',
'Home Crime': 'جريمة عائلية',
'Home Phone': 'هاتف المنزل',
'Home Town': 'مسقط رأس',
'Homeless': 'بلا مأوى',
'Homepage': 'الصفحة الرئيسية',
'Hospital': 'المستشفى',
'Hospital Details': 'تفاصيل المستشفى',
'Hospital information added': 'تمت اضافة المعلومات الخاصة بالمستشفى',
'Hospitals': 'مستشفيات',
'Host National Society': 'الجمعية الوطنية المضيف',
'Hot Spot': 'هوت سبوت',
'Hour': 'ساعة',
'Hourly': 'ساعيا',
'hourly': 'كل ساعة',
'Hours': 'الساعات',
'hours': 'ساعات',
'Hours added': 'الساعات تم اضافتها',
'Hours deleted': 'ساعات تم حذفها',
'Hours Details': 'تفاصيل ساعات',
'Hours Model': 'نموذج الساعات',
'Hours updated': 'الساعات تم تحديثها',
'House Design': 'تصميم المنزل',
'Household': 'الاسرة',
'Household kits received': 'أطقم المعدات المنزلية الواردة',
'Household Members': 'أفراد الأسرة',
'Households': 'الأسر',
'Households below poverty line': 'الاسر تحت خط الفقر',
'Housing Repair and Retrofitting ': 'إصلاح المساكن والتعديل التحديثي',
'Housing Types': 'نوع السكن',
'How is this person affected by the disaster? (Select all that apply)': 'كيف تضرر هذا الشخص من جراء الكارثة؟ (اختر كل ما ينطبق عليه)',
'How long will the food last?': 'كم من الوقت سوف يستمر الطعام؟',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'كم عدد الفتيان (0-17 عاما) المفقودين بسبب الأزمة',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'كم عدد الفتيات (0-17 سنوات) اللائي توفين بسبب الأزمة',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'كم عدد الفتيات (0-17 عاما)اللواتي أُصبن بسبب الأزمة',
'How many Men (18 yrs+) are Injured due to the crisis': 'كم عدد الرجال المصابين(+18 عاما ) بسبب الأزمة',
'How many Men (18 yrs+) are Missing due to the crisis': 'كم عدد الرجال ( + 18 عاما) مفقود بسبب الأزمة',
'How many new cases have been admitted to this facility in the past 24h?': 'كم عدد الحالات الجديدة التي تم نقلها لهذا المرفق في 24ساعة الماضية؟',
'How many Women (18 yrs+) are Missing due to the crisis': 'كم عدد النساء (+18عاما) المفقودات بسبب الأزمة',
'Human Resource': 'الموارد البشرية',
'Human Resource added': 'تم إضافة الموارد البشرية',
'Human Resource assigned': 'تعيين الموارد البشرية',
'Human Resource Assignment updated': ' تم تحديث تعيين الموارد البشرية',
'Human Resource Details': 'تفاصيل الموارد البشرية',
'Human Resource Development': 'تطوير الموارد البشرية',
'Human Resource Management': 'إدارة الموارد البشرية',
'Human Resource unassigned': 'الموارد البشرية غير المعينة',
'Human Resources': 'الموارد البشرية',
'Humanitarian Diplomacy': 'الدبلوماسية الإنسانية',
'Humanitarian Monitoring': 'الرصد الإنساني',
'Hurricane': 'اعصار',
'Hygiene': 'النظافة',
'Hygiene NFIs': 'مواد النظافة',
'Hygiene practice': 'ممارسة النظافة',
'Hygiene Promotion': 'تعزيز النظافة',
'I am available in the following area(s)': 'انا متوفر في المجالات التالية',
'ID': 'الهوية الشخصية',
'ID Card': 'بطاقة التعريف',
'ID Card Number': 'ID رقم البطاقة',
'ID Number': 'رقم الهوية',
'ID Tag Number': 'رقم البطاقة التعريفية',
'ID Type': 'نوع ID',
'Identities': 'الهويات',
'Identity': 'هوية',
'Identity added': 'تم إضافة الهوية',
'Identity deleted': 'تم حذف الهوية',
'Identity Details': 'تفاصيل الهوية',
'Identity updated': 'تم تحديث الهوية',
'IEC Materials': 'المواد IEC',
'If it is a URL leading to HTML, then this will downloaded.': 'إذا كان هذا رابطا يؤدي إلى HTML اذن سيتم التحميل.',
'If neither are defined, then the Default Marker is used.': 'إذا لم يتم تحديد أي أحد ،ستستخدم علامة افتراضية.',
'If no marker defined then the system default marker is used': 'إذا لم يكن هناك علامة محددة سوف يتم استخدام العلامة الافتراضية للنظام',
'If not the Head of Household': 'إن لم يكن رب الأسرة',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'اذا تم الاختيار,فسيتم تحديث موقع الضبط حيثما وجد موقع الشخص.',
'If the organization is a lead for this sector.': 'إذا كانت المنظمة هي الرائدة في هذا القطاع.',
'If the person counts as essential staff when evacuating all non-essential staff.': 'إذا بحساب الشخص الموظفين الأساسيين عندما إجلاء جميع موظفيها غير الاساسيين.',
'If the request type is "Other", please enter request details here.': 'إذا كان نوع الطلب "أخر"، إذا سمحت أدخل تفاصيل الطلب هنا.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'إذا كان هذا التكوين يمثل منطقة لقائمة المناطق ، أعطه اسما لاستخدامه في القائمة. سيكون اسم التكوين الشخصي للخريطة موافقا لاسم المستخدم.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'في حالة تعبئة هذا الحقل ثم المستخدم الذي يحدد هذه المنظمة عند الاشتراك سيتم تعيين بمثابة أركان هذه المنظمة إلا إذا لم تطابق المجال الخاص بها حقل المجال.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'في حالة تعبئة هذا الحقل ثم مستخدم مع المجال سيتم تلقائيا تعيين بمثابة أركان هذه المنظمة المحددة',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'إذا تم وضع العلامة، سيصبح هذا موقع قاعدة المستخدم حيث ظهرعلى الخريطة',
'If this record should be restricted then select which role is required to access the record here.': 'إذا إستلزم تقييد هذا التسجيل فاختر الدور المناسب للدخول في التسجيل هنا.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'إذا ينبغي أن يقتصر هذا السجل ثم حدد الدور الذي (ق) يسمح للوصول إلى سجل هنا.',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'إذا لم تدخل وثيقة مرجعية ، سيتم عرض البريد الإلكتروني الخاص بك للتحقق من هذه البيانات.',
"If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": 'إذا كنت لا ترى النشاط في القائمة، يمكنك إضافة واحدة جديدة بالنقر على رابط \'إنشاء آخر ".',
"If you don't see the beneficiary in the list, you can add a | |
from re import findall
from math import sin, cos, tan
from colorama import init, Style, Fore
from MineScriptVisitor import MineScriptVisitor
from MineScriptParser import MineScriptParser
import tags
approved_attrs = [
"append",
"pop",
"remove",
"sort"
]
error = f"{Fore.RED}Traceback:\nFile %s, line %i\n%s\n"
end = f"{Style.RESET_ALL}"
typeerror1 = "TypeError: Variable '%s' should be of type int or float, not %s"
typeerror2 = "TypeError: Object of type %s has no length"
typeerror3 = "TypeError: Indices should be of type int, not %s"
typeerror4 = "TypeError: Object of type %s is not callable"
typeerror5 = "TypeError: Unsupported operand types for %s: '%s' and '%s'"
typeerror6 = "TypeError: '%s' not supported between instances of '%s' and '%s'"
typeerror7 = "TypeError: float() argument must be a string or a number, not '%s'"
typeerror8 = "TypeError: Function argument must be a string or a number, not '%s'"
typeerror9 = "TypeError: Invalid type"
typeerror10 = "TypeError: Function %s() requires %s arguments, but %s were given"
nameerror = "NameError: Variable '%s' is not defined"
syntaxerror1 = "SyntaxError: invalid syntax"
syntaxerror2 = "SyntaxError: 'return' outside of function"
indexerror = "IndexError: List index %i is out of range"
attributeerror = "AttributeError: Object of type %s has no attribute %s"
valueerror1 = "ValueError: Could not convert to float: '%s'"
valueerror2 = "ValueError: Could not convert to int: '%s'"
valueerror3 = "ValueError: Selector not formatted correctly"
def add_to_selector(selector, args):
if len(selector) < 3:
selector = selector + "[]"
tp = selector[:2]
attributes = findall("\[([^]]+)\]", selector)
if len(attributes): attributes = attributes[0].split(",")
attributes = [attr.strip() for attr in attributes]
for arg in args:
if arg not in attributes:
attributes.append(arg)
return tp + "[" + ','.join(attributes) + "]"
class Visitor(MineScriptVisitor):
def __init__(self, name, code, file):
self.code = code # The actual code
self.file = file # Filename
self.datapack_name = name # Name of the datapack
self._commands = [] # List of commands to be added to the current function
self.warnings = [] # List of warnings
self.memory = {} # Stores variables
self.localmemory = {} # Stores local variables
self.functionargs = {} # Stores the args a function takes
self.func = None # Current function
self.r_value = None # Stores the return variable of the current function
self.igmemory = [] # Stores the in-game variable names
self.igfunctionargs = {} # Stores the args an igfunction takes
self.igfunctionreturn = {} # Stores the name of the return variable of a function
self.igfunctions = {} # Stores the functions to be turned into .mcfunction files
self.igfunc = None # Current igfunction
self.vars = [] # Stores all the temporary variables currently in use
self.nreusable = [] # Stores all non reusable variables
self.igloops = {} # Stores the loops to be turned into .mcfunction files
self.loop = [] # Keeps track of loops
self.prefixes = [] # Keeps track of if/else and execute statements
self.loops = 0 # Loop ID
self.tag = 0 # Tag ID
self.get_tags() # Get all tags from file
self.memory["dpname"] = name
def add_var(self, name): # Add a new in-game variable
if name not in self.igmemory:
self.igmemory.append(name)
def mark_unused(self, var): # Mark an in-game variable as unused
name = int(var[4:])
if var not in self.nreusable and name in self.vars:
self.vars.remove(name)
def mark_not_reusable(self, var): # Mark an in-game variable as not reusable
if var not in self.nreusable:
self.nreusable.append(var)
def mark_reusable(self, var): # Mark an in-game variable as reusable
if var in self.nreusable:
self.nreusable.remove(var)
def get_var(self): # Generate and return a new temporary variable
n = self.get_next_var_id()
name = f"_var{n}"
self.add_var(name)
self.vars.append(n)
return name
def get_next_var_id(self): # Get the id of the next available variable
if len(self.vars):
for i in range(len(self.vars)):
if self.vars[i] != i:
self.vars.insert(i, i)
return i
return len(self.vars)
return 0
def add_func_arg(self, func, var, arg):
self.igfunctionargs[func][0].append(var)
self.igfunctionargs[func][1].append(arg)
def add_warning(self, warning): # Add a warning
if warning not in self.warnings:
self.warnings.append(warning)
def get_tags(self): # Get and load all tags
for tag in tags.tags:
self.memory[tag] = tags.tags[tag]
def add_cmd(self, command, func=None): # Add a command to the current function
if self.prefixes != []:
command = "execute " + ' '.join(self.prefixes) + " run " + command
if self.loop != []:
self.igloops[self.loop[-1]].append(command)
elif func:
self.igfunctions[func].append(command)
else:
self._commands.append(command)
def add_loop(self, tp): # Add a loop
self.loops += 1
self.loop.append(f"_{tp}{self.loops}")
self.igloops[self.loop[-1]] = []
def pop_loop(self): # Pop latest loop
self.loop.pop()
def get_var_name(self, name):
if self.igfunc:
if name in self.igfunctionargs[self.igfunc][1]:
index = self.igfunctionargs[self.igfunc][1].index(name)
return self.igfunctionargs[self.igfunc][0][index]
else:
return name
else:
return name
def add_prefix(self, cmd): # Add prefix (if/else/execute statement)
self.prefixes.append(cmd)
def pop_prefix(self): # Pop latest prefix
self.prefixes.pop()
def set_var(self, name, value): # Set an in-game variable
self.add_cmd(f"scoreboard players set MineScript {name} {value}")
def igBoolOp(self, operation, left, right):
unused = []
if left[1] == "rt":
l = self.get_var()
unused.append(l)
self.set_var(l, int(left[0]))
elif left[1] == "ig": l = left[0]
if right[1] == "rt":
r = self.get_var()
unused.append(r)
self.set_var(r, int(right[0]))
elif right[1] == "ig": r = right[0]
result = self.get_var()
self.set_var(result, 0)
if operation == "&&":
self.add_cmd(f"execute if score MineScript {l} matches 1.. if score MineScript {r} matches 1.. run scoreboard players set MineScript {result} 1")
elif operation == "||":
self.add_cmd(f"execute if score MineScript {l} matches 1.. run scoreboard players set MineScript {result} 1")
self.add_cmd(f"execute if score MineScript {r} matches 1.. run scoreboard players set MineScript {result} 1")
for var in unused:
self.mark_unused(var)
return result
def igComparison(self, comparison, left, right): # genexpr (< > <= >= == !=) genexpr
unused = []
if left[1] == "rt":
l = self.get_var()
unused.append(l)
self.set_var(l, int(left[0]))
elif left[1] == "ig": l = left[0]
if right[1] == "rt":
r = self.get_var()
unused.append(r)
self.set_var(r, int(right[0]))
elif right[1] == "ig": r = right[0]
result = self.get_var()
if comparison == "==": comparison = "="
if comparison == "!=":
self.set_var(result, 0)
self.add_cmd(f"execute unless score MineScript {l} = MineScript {r} run scoreboard players set MineScript {result} 1")
else:
self.set_var(result, 0)
self.add_cmd(f"execute if score MineScript {l} {comparison} MineScript {r} run scoreboard players set MineScript {result} 1")
for var in unused:
self.mark_unused(var)
return result
def igOperation(self, operation, left, right, result=None): # genexpr (+-/*%) genexpr
unused = []
if operation == "^":
if result is None: result = self.get_var()
if right[1] == "rt" and left[1] == "ig":
if right[0] == 0: self.set_var(result, 1)
else:
self.add_cmd(f"scoreboard players operation MineScript {result} = MineScript {left[0]}")
for _ in range(right[0]-1):
self.add_cmd(f"scoreboard players operation MineScript {result} *= MineScript {left[0]}")
elif right[1] == "ig":
r = right[0]
if left[1] == "rt":
l = self.get_var()
unused.append(l)
self.add_cmd(f"scoreboard players set MineScript {l} {int(left[0])}")
elif left[1] == "ig": l = left[0]
self.add_cmd(f"scoreboard players operation MineScript {result} = MineScript {l}")
count = self.get_var()
negative = self.get_var()
unused.append(count)
unused.append(negative)
self.set_var(negative, -1)
self.add_cmd(f"scoreboard players operation MineScript {count} = MineScript {r}")
self.add_cmd(f"execute if score MineScript {count} matches ..-1 run scoreboard players operation MineScript {count} *= MineScript {negative}")
self.add_cmd(f"scoreboard players remove MineScript {count} 1")
loop = "_pow" + str(self.loops+1)
self.add_cmd(f"execute if score MineScript {count} matches 1.. run function {self.datapack_name}:{loop}")
self.add_loop("pow")
self.add_cmd(f"scoreboard players operation MineScript {result} *= MineScript {l}")
self.add_cmd(f"scoreboard players remove MineScript {count} 1")
self.add_cmd(f"execute if score MineScript {count} matches 1.. run function {self.datapack_name}:{loop}")
self.pop_loop()
self.add_cmd(f"execute if score MineScript {r} matches 0 run scoreboard players set MineScript {result} 1")
unit = self.get_var()
unused.append(unit)
self.set_var(unit, 1)
self.add_cmd(f"execute if score MineScript {r} matches ..-1 run scoreboard players operation MineScript {unit} /= MineScript {result}")
self.add_cmd(f"execute if score MineScript {r} matches ..-1 run scoreboard players operation MineScript {result} = MineScript {unit}")
else:
if left[1] == "rt":
l = self.get_var()
unused.append(l)
self.set_var(l, int(left[0]))
elif left[1] == "ig": l = left[0]
if right[1] == "rt":
r = self.get_var()
unused.append(r)
self.set_var(r, int(right[0]))
elif right[1] == "ig": r = right[0]
if result is None: result = self.get_var()
self.add_cmd(f"scoreboard players operation MineScript {result} = MineScript {l}")
self.add_cmd(f"scoreboard players operation MineScript {result} {operation}= MineScript {r}")
for var in unused:
self.mark_unused(var)
return result
def visitIgAssign(self, ctx): # Expression of type $var = expression
name = ctx.ID().getText()
value = self.visitChildren(ctx)
if type(value) == int or type(value) == float:
self.add_var(name)
self.add_cmd(f"scoreboard players | |
"""
Defines various utilities for BDF parsing including:
- to_fields
"""
from __future__ import annotations
import os
from io import StringIO
from collections import defaultdict
from typing import List, Dict, Tuple, Optional, Any, TYPE_CHECKING
#import pyNastran
from pyNastran.bdf.errors import CardParseSyntaxError
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
_REMOVED_LINES = [
'$EXECUTIVE CONTROL DECK',
'$CASE CONTROL DECK',
'$NODES', '$SPOINTS', '$ELEMENTS',
'$PARAMS', '$PROPERTIES', '$ELEMENTS_WITH_PROPERTIES',
'$ELEMENTS_WITH_NO_PROPERTIES (PID=0 and unanalyzed properties)',
'$UNASSOCIATED_PROPERTIES',
'$MATERIALS', '$THERMAL MATERIALS',
'$CONSTRAINTS', '$SPCs', '$MPCs', '$RIGID ELEMENTS',
'$LOADS', '$AERO', '$AERO CONTROL SURFACES',
'$STATIC AERO', '$FLUTTER', '$GUST',
'$DYNAMIC', '$OPTIMIZATION',
'$COORDS', '$THERMAL', '$TABLES', '$RANDOM TABLES',
'$SETS', '$CONTACT', '$REJECTS', '$REJECT_LINES',
'$PROPERTIES_MASS', '$MASSES',
]
EXPECTED_HEADER_KEYS_CHECK = [
'version', 'encoding', 'nnodes', 'nelements',
'punch', 'dumplines', 'is_superelements', # booleans
]
EXPECTED_HEADER_KEYS_NO_CHECK = ['skip_cards', 'units', 'code-block']
def _to_fields_mntpnt1(card_lines: List[str]) -> List[str]:
assert len(card_lines) == 2, card_lines
line1, line2 = card_lines
label = line1[:24]
unused_comment = line1[24:] # len=56 max
#assert ',' not in label, f'base={label!r}'
assert '\t' not in label, f'base={label!r}'
fields = [
line1[0:8],
line1[8:16], line1[16:24], line1[24:32], line1[32:40], line1[40:48],
line1[48:56], line1[56:64], line1[64:72],
]
#assert ',' not in line2, card_lines
assert '\t' not in line2, card_lines
assert '*' not in line2, card_lines
if ',' in line2:
# drop off the first field of row2
fields += line2.split(',')[1:]
else:
fields += [
line2[8:16], line2[16:24], line2[24:32], line2[32:40], line2[40:48],
line2[48:56], line2[56:64], line2[64:72],
]
return fields
def to_fields(card_lines: List[str], card_name: str) -> List[str]:
"""
Converts a series of lines in a card into string versions of the field.
Handles large, small, and CSV formatted cards.
Parameters
----------
lines : List[str]
the lines of the BDF card object
card_name : str
the card_name -> 'GRID'
Returns
-------
fields : List[str]
the string formatted fields of the card
.. warning:: this function is used by the reader and isn't intended
to be called by a separate process
.. code-block:: python
>>> card_lines = ['GRID,1,,1.0,2.0,3.0']
>>> card_name = 'GRID'
>>> fields = to_fields(lines, card_name)
>>> fields
['GRID', '1', '', '1.0', '2.0', '3.0']
"""
fields = [] # type: List[str]
if card_name in ['MONPNT1']:
return _to_fields_mntpnt1(card_lines)
# first line
line = card_lines[0].rstrip()
if '=' in line:
msg = 'card_name=%r\nequal signs are not supported...line=%r' % (card_name, line)
raise CardParseSyntaxError(msg)
if '\t' in line:
line = expand_tabs(line)
if '*' in line: # large field
if ',' in line: # csv
new_fields = line.split(',')[:5]
for unused_i in range(5 - len(new_fields)):
new_fields.append('')
else: # standard
new_fields = [line[0:8], line[8:24], line[24:40], line[40:56],
line[56:72]]
fields += new_fields
assert len(fields) == 5, fields
else: # small field
if ',' in line: # csv
new_fields = line.split(',')[:9]
for unused_i in range(9 - len(new_fields)):
new_fields.append('')
else: # standard
new_fields = [line[0:8], line[8:16], line[16:24], line[24:32],
line[32:40], line[40:48], line[48:56], line[56:64],
line[64:72]]
fields += new_fields
assert len(fields) == 9, fields
for line in card_lines[1:]: # continuation lines
if '=' in line and card_name != 'EIGRL':
msg = 'card_name=%r\nequal signs are not supported...\nline=%r' % (card_name, line)
raise CardParseSyntaxError(msg)
if '\t' in line:
line = expand_tabs(line)
if '*' in line: # large field
if ',' in line: # csv
new_fields = line.split(',')[1:5]
for unused_i in range(4 - len(new_fields)):
new_fields.append('')
else: # standard
new_fields = [line[8:24], line[24:40], line[40:56], line[56:72]]
assert len(new_fields) == 4, new_fields
else: # small field
if ',' in line: # csv
new_fields = line.split(',')[1:9]
for unused_i in range(8 - len(new_fields)):
new_fields.append('')
else: # standard
new_fields = [line[8:16], line[16:24], line[24:32],
line[32:40], line[40:48], line[48:56],
line[56:64], line[64:72]]
if len(new_fields) != 8:
nfields = len(new_fields)
msg = 'nfields=%s new_fields=%s' % (nfields, new_fields)
raise RuntimeError(msg)
fields += new_fields
return fields
def expand_tabs(line: str) -> str:
"""expands the tabs; breaks if you mix commas and tabs"""
line = line.expandtabs()
if ',' in line:
line = line.replace('\t', '')
msg = f'tabs and commas in the same line are not supported...\nline={line!r}'
raise CardParseSyntaxError(msg)
return line
def parse_executive_control_deck(
executive_control_lines: List[str]) -> Tuple[Optional[int], Optional[str], Optional[int]]:
"""Extracts the solution from the executive control deck"""
sol = None
method = None
sol_iline = None
for (i, eline) in enumerate(executive_control_lines):
uline = eline.strip().upper() # uppercase line
uline = uline.split('$')[0].expandtabs()
if uline[:4] in ['SOL ']:
if ',' in uline:
sline = uline.split(',') # SOL 600,method
sol_value = sline[0].strip()
method = sline[1].strip()
else:
sol_value = uline
method = None
if sol is None:
sol = sol_value[3:].strip(' \t=')
if ',' not in sol:
try:
# SOL 101
sol = int(sol)
except ValueError:
# SOL SESTATIC
pass
else:
raise ValueError('cannot overwrite solution existing='
f'|SOL {sol}| new={uline!r}')
sol_iline = i
return sol, method, sol_iline
def _parse_pynastran_header(line: str) -> Tuple[Optional[str], Optional[str]]:
"""
Parameters
----------
line : str
the line to parse (e.g., '$ pyNastran: version=NX')
Returns
-------
key : str / None
the key for the parameters
str : valid (e.g., 'version')
None : invalid
value : str / None
the key for the parameters
str : valid (e.g., 'NX')
None : invalid
Search for data of the form:
..code-block :: python
$ pyNastran: version=NX
$ pyNastran: encoding=latin-1
$ pyNastran: punch=True
$ pyNastran: dumplines=True
$ pyNastran: nnodes=10
$ pyNastran: nelements=100
$ pyNastran: skip_cards=PBEAM,CBEAM
$ pyNastran: units=in,lb,s
$ pyNastran: skip elements=12345,6,7,8
If we find:
..code-block :: python
$$ pyNastran: version=NX
or a line without a valid pyNastran flag, we'll stop reading,
even a valid header statement is on the following line.
"""
lline = line[1:].lower().strip()
if len(lline) == 0 or lline[0] == '$':
key = None
value = None
elif 'pynastran' in lline:
base, word = lline.split(':', 1)
if base.strip() != 'pynastran':
msg = 'unrecognized pyNastran marker\n'
msg += 'line=%r' % line
raise SyntaxError(msg)
try:
key, value = word.strip().split('=', 1)
except ValueError:
msg = (
'expected header of the form:\n'
'$ pyNastran: version=NX\n'
'$ pyNastran: encoding=latin-1\n'
'$ pyNastran: punch=True\n'
'$ pyNastran: dumplines=True\n'
'$ pyNastran: nnodes=10\n'
'$ pyNastran: nelements=100\n'
'$ pyNastran: skip_cards=PBEAM,CBEAM\n'
'$ pyNastran: units=in,lb,s\n'
'$ pyNastran: skip elements=12345,6,7,8\n'
)
raise SyntaxError(msg)
key = key.strip()
value = value.strip()
if key in EXPECTED_HEADER_KEYS_CHECK:
assert ' ' not in value, 'value=%r' % value
elif key in EXPECTED_HEADER_KEYS_NO_CHECK:
pass
elif 'skip ' in key:
pass
else:
msg = '\nunrecognized pyNastran key=%r type(key)=%s\n' % (key, type(key))
msg += 'line=%r\n' % line
msg += 'expected_keys = [%s]\n' % ', '.join(
EXPECTED_HEADER_KEYS_CHECK + EXPECTED_HEADER_KEYS_NO_CHECK)
msg += 'type(key0) = %s' % type(EXPECTED_HEADER_KEYS_CHECK[0])
print(msg)
raise KeyError(msg)
else:
key = None
value = None
return key, value
#def clean_empty_lines(lines):
## type: (List[str]) -> List[str]
#"""
#Removes leading and trailing empty lines
#don't remove internally blank lines
#"""
#found_lines = False
#if len(lines) < 2:
#return lines
#for i, line in enumerate(lines):
#if not found_lines and line:
#found_lines = True
#n1 = i
#n2 = i + 1
#elif found_lines and line:
#n2 = i + 1
#lines2 = lines[n1:n2]
#return lines2
def print_filename(filename: str, relpath: bool=True) -> str:
"""
Takes a path such as C:/work/fem.bdf and locates the file using
relative paths. If it's on another drive, the path is not modified.
Parameters
----------
filename : str
a filename string
Returns
-------
filename_string : str
a shortened representation of the filename
"""
if isinstance(filename, StringIO):
return '<StringIO>'
drive_letter = os.path.splitdrive(os.path.abspath(filename))[0]
if drive_letter == os.path.splitdrive(os.curdir)[0] and relpath:
return os.path.relpath(filename)
return filename
def _parse_dynamic_syntax(key: str,
dict_of_vars: Dict[str, Any],
log: Any) -> Dict[str, Any]:
"""
Applies the dynamic syntax for %varName
Parameters
----------
key : str
the uppercased key
Returns
-------
value : int/float/str
the dynamic value defined by dict_of_vars
.. seealso:: :func: `set_dynamic_syntax`
"""
key = key.strip()[1:]
log.debug("dynamic key = %r" % key)
#dict_of_vars = {'P5':0.5,'ONEK':1000.}
if key not in dict_of_vars:
msg = "key=%r not found in keys=%s" % (key, dict_of_vars.keys())
raise KeyError(msg)
return dict_of_vars[key]
def _get_card_name(lines: List[str], active_filename: str) -> Optional[str]:
"""
Returns the name of the card defined by the provided lines
Parameters
----------
lines : list[str]
the lines of the card
Returns
-------
card_name : str
the name of the card
"""
card_name = lines[0][:8].rstrip('\t, ').split(',')[0].split('\t')[0].strip('*\t ')
if len(card_name) == 0:
return None
if ' ' in card_name or len(card_name) == 0:
msg = 'card_name=%r\nline=%r in filename=%r is invalid' \
% (card_name, lines[0], active_filename)
print(msg)
raise CardParseSyntaxError(msg)
return card_name.upper()
def fill_dmigs(model: BDF) -> None:
"""fills the DMIx cards with the | |
##############################################################################
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# File Abstract:
# Contains all the Operator functionality
##############################################################################
from Helpers import Log
from Util import Sleep
from Util import Time
from Util import Utility
from Helpers import ThreadManager
from Helpers import Worker
from Helpers import Collector
from decimal import Decimal
class ConstantCollector(Collector.Collector):
def __init__(self,ID, DefaultValue):
self._MinionID = ID
self._OverrideID = ID
self.IsDefaultValue = False
if None != DefaultValue:
self._LastSentValue = DefaultValue
self.IsDefaultValue = True
else:
self._LastSentValue = ID
self._ReadyForConsumptionByAnother = True
def GetLastElapsedTimePeriod(self):
return Time.GetCurrMS()
# Helper class to hold the information of the script or app it will call to
# gather some kind of data
class Operator(Collector.Collector):
def __init__(self,objNamespace,ID,InGroup):
Collector.Collector.__init__(self,objNamespace,ID,InGroup)
self._InputList = []
self._Collectors = None
self._ConstantCollectorsList = []
self._InvalidInpWarningSent = False
def AddInput(self,CollectorID,isConstant,constVal=None):
if isConstant:
objConstant = ConstantCollector(CollectorID,constVal)
self._ConstantCollectorsList.append(objConstant)
#objCollector = self._NamespaceObject.GetCollector(CollectorID)
if None == constVal: # constVal != None, then they specified DefaultValue attribute, and we handle it
self._InputList.append(CollectorID) # differently, because we only want def value to be used until input is valid
return True
def GetCollectors(self):
incomplete=False
list = []
if None == self._Collectors:
for CollectorID in self._InputList:
objCollector = self._NamespaceObject.GetCollector(CollectorID) # is it a real collector that has been created yet
if None == objCollector: # nope, let's see if it has a constant val instead
for objCollector in self._ConstantCollectorsList:
if CollectorID == objCollector.GetID():
if objCollector.IsDefaultValue:
incomplete=True
break
if None == objCollector:
if False == self._InvalidInpWarningSent:
Log.getLogger().warn("Operator with invalid/unknown <Input>: " + CollectorID + ". Ignoring until it becomes valid.")
self._InvalidInpWarningSent = True
list = []
break
if not objCollector._ReadyForConsumptionByAnother:
objCollector = None
for objCol in self._ConstantCollectorsList:
if CollectorID == objCol.GetID():
if objCol.IsDefaultValue:
incomplete=True
objCollector = objCol
break
if None == objCollector:
if False == self._InvalidInpWarningSent:
Log.getLogger().warn("Operator with invalid/unknown <Input>: " + CollectorID + ". Ignoring until it becomes valid.")
self._InvalidInpWarningSent = True
list = []
incomplete = True
break
list.append(objCollector)
#if False == incomplete:
## self._Collectors = list # made it through, have all of them!
else:
list = self._Collectors
return list
def Collect(self):
raise Exception("Patrick forgot to override the Collect() method on an Operator")
pass
#will sum other collectors
class Operator_Addition(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
total = 0
for collector in self.GetCollectors():
try:
total = total + float(collector.GetLastValue())
except Exception:
if not self._InvalidInpWarningSent:
self._InvalidInpWarningSent = True
return "Operator Addition Collectors are only valid for collectors that collect a numeric value"
else:
return self.ErrorValue
return str(total)
#will average the data from other collectors, or a single collector
class Operator_Average(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
total = 0
list = self.GetCollectors()
if len(list) < 1:
return str(0)
for collector in list:
try:
val = float(collector.GetLastValue())
if collector.ReadyForConsumption():
total = total + val
except Exception as Ex:
if not self._InvalidInpWarningSent:
self._InvalidInpWarningSent = True
return "Operator Average Collectors are only valid for collectors that collect a numeric value:--> " + str(collector.GetLastValue())
else:
return self.ErrorValue
if len(list) > 1:
if total != 0:
avg = total / len(list)
else:
avg = 0
else: #just average this collector with itself
if not hasattr(self, "historyList"):
self.historyList = []
if len(self.historyList) > 10: # keep a bunch of samples for average, could make this configurable I suppose
del self.historyList[0]
self.historyList.append(total)
listTotal = 0
for val in self.historyList:
listTotal += val
if listTotal != 0:
avg = listTotal / len(self.historyList)
else:
avg = 0
return str(avg)
#will make a list of data from other collectors
class Operator_MakeList(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
total = ""
first = True
for collector in self.GetCollectors():
if first == True:
total = total + str(collector.GetLastValue()) # 1st one doesnt' have a comma before it
first = False
else:
total = total + "," + str(collector.GetLastValue())
return total
# will simply send the value collected from another collector
class Operator_Duplicate(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
self._WarningSent = False
#override this for duplicate, as could be off by quite some time
#if the operator is called significantly later than the dynamic collector
#or other collector associated with ti
def GetElapsedTimeSinceLast(self):
if None == self._SyncFile:
dupCol = self.GetCollectors()
if len(dupCol) > 0:
elapsedTime = dupCol[0].GetLastElapsedTimePeriod()
else:
elapsedTime = 0
else:
elapsedTime = self.GetTimeMS() - self._LastCollectionTime
return elapsedTime
def Collect(self):
list = self.GetCollectors()
if len(list) !=1 :
if len(list) > 1 and not self._WarningSent:
Log.getLogger().warn("Duplicate Operator given more than 1 collector to duplicate. Only sending the 1st one.")
self._WarningSent = True
return "Invalid Duplicate Operator"
#if collector.ReadyForConsumption():
return list[0].GetLastValue()
#return None
class Operator_Compare_EQ(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
self._Value1 = None
self._Value2 = None
self._If = None
self._Else = None
self._ReallyDoNotSend = False
self._Verified = False
def Value1(self):
return self._Value1.GetLastValue().strip()
def Value2(self):
return self._Value2.GetLastValue().strip()
def _VerifyInput(self,CompareType):
if self._Verified:
return True
collectors = self.GetCollectors()
if len(collectors) == 0:
return "Operator -Input- still pending"
if len(collectors) < 3:
if not self._WarningSent:
Log.getLogger().warn(CompareType + " Operator does not have enough input.")
return 'Invalid " + CompareType + " Configuration'
if len(collectors) > 4:
Log.getLogger().warn(CompareType + " Operator has too many inputs")
return 'Invalid " + CompareType + " Configuration'
self._Value1 = collectors[0]
self._Value2 = collectors[1]
self._If = collectors[2]
if len(collectors) == 4:
self._Else = collectors[3]
self._ReallyDoNotSend = self._DoNotSend
if None != self._Collectors:
self._Verified = True
return True
def _Perform(self,compareResult):
if compareResult:
self._DoNotSend = self._ReallyDoNotSend
return self._If.GetLastValue() #if
if None != self._Else:
self._DoNotSend = self._ReallyDoNotSend
return self._Else.GetLastValue()
self._DoNotSend = True # do nothing
return str(compareResult)
def Collect(self):
valid = self._VerifyInput("Compare_EQ")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) == float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() == self.Value2())
class Operator_Compare_NE(Operator_Compare_EQ):
def __init__(self,objNamespace,ID,InGroup=False):
Operator_Compare_EQ.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
valid = self._VerifyInput("Compare_NE")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) != float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() != self.Value2())
class Operator_Compare_GT(Operator_Compare_EQ):
def __init__(self,objNamespace,ID,InGroup=False):
Operator_Compare_EQ.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
valid = self._VerifyInput("Compare_GT")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) > float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() > self.Value2())
class Operator_Compare_GE(Operator_Compare_EQ):
def __init__(self,objNamespace,ID,InGroup=False):
Operator_Compare_EQ.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
valid = self._VerifyInput("Compare_GE")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) >= float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() >= self.Value2())
class Operator_Compare_LT(Operator_Compare_EQ):
def __init__(self,objNamespace,ID,InGroup=False):
Operator_Compare_EQ.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
valid = self._VerifyInput("Compare_LT")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) < float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() < self.Value2())
class Operator_Compare_LE(Operator_Compare_EQ):
def __init__(self,objNamespace,ID,InGroup=False):
Operator_Compare_EQ.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
valid = self._VerifyInput("Compare_LE")
if valid != True:
return valid #is an error string
try: # try numeric compare first
return self._Perform(float(self.Value1()) <= float(self.Value2()))
except Exception:
pass
return self._Perform(self.Value1() <= self.Value2())
def is_number(strNumber):
for char in strNumber:
if char == ' ': # Have to do this because the float line below doesn't care about spaces
return False
try:
float(strNumber)
return True
except ValueError:
return False
class Operator_Greatest(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
if len(self.GetCollectors()) < 2:
if not self._WarningSent:
Log.getLogger().warn("Greatest Operator must have at least 2 inputs")
return "Greatest Operating has insufficent Inputs"
greatest = self.GetCollectors()[0].GetLastValue()
for collector in self.GetCollectors():
val = collector.GetLastValue()
if is_number(greatest) and is_number(val): # is numeric
if float(greatest) < float(val):
greatest = val
elif greatest < collector.GetLastValue(): # is string compare
greatest = collector.GetLastValue()
return greatest
class Operator_Least(Operator):
def __init__(self,objNamespace,ID,InGroup=False):
Operator.__init__(self,objNamespace,ID,InGroup)
def Collect(self):
if len(self.GetCollectors()) < 2:
if not self._WarningSent:
Log.getLogger().warn("Least Operator must have at least 2 inputs")
return "Least Operating has insufficent Inputs"
least = self.GetCollectors()[0].GetLastValue()
for collector | |
<reponame>pywash/pywash<filename>src/BandB/MissingValues.py
'''
MIT License
Copyright (c) [2018] [<NAME>]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
'''
import numpy as np
import pandas as pd
from fancyimpute import KNN, MatrixFactorization, IterativeImputer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import Imputer
import os
import sys
from sklearn.tree import DecisionTreeClassifier
class NoStdStreams(object):
def __init__(self, stdout=None, stderr=None):
self.devnull = open(os.devnull, 'w')
self._stdout = stdout or self.devnull or sys.stdout
self._stderr = stderr or self.devnull or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush();
self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush();
self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.devnull.close()
def identify_missing(df=None, na_values=['n/a', 'na', '--', '?']):
"""Detect missing values.
Identify the common missing characters such as 'n/a', 'na', '--'
and '?' as missing. User can also customize the characters to be
identified as missing.
Parameters
----------
df : DataFrame
Raw data formatted in DataFrame.
Returns
-------
flag : bool
Indicates whether missing values are detected.
If true, missing values are detected. Otherwise not.
"""
for value in na_values:
df = df.replace(value, np.nan)
# flag indicates whether any missing value is detected
flag = df.isnull().values.any()
return flag
def identify_missing_mechanism(df=None):
"""Tries to guess the missing mechanism of the dataset.
Missing mechanism is not really testable. There may be reasons to
suspect that the dataset belongs to one missing mechanism based on
the missing correlation between features, but the result is not
definite. Relevant information are provided to help the user make
the decision.
Three missng mechanisms to be guessed:
MCAR: Missing completely at ramdom
MAR: Missing at random
MNAR: Missing not at random (not available here, normally involes field expert)
Parameters
----------
df : DataFrame
Raw data formatted in DataFrame.
"""
# Pearson correlation coefficient between every 2 features
# print("")
# print("Missing correlation (Pearson correlation coefficient) between every 2 features")
# display(df.isnull().corr())
df2 = df.iloc[:, :-1].copy()
missing_columns = df2.columns[df2.isnull().any(axis=0)] # columns containing missing values
# replace nan as true, otherwise false for features containing missing values
df2[df2.columns[df2.isnull().any(axis=0)]] = df2[df2.columns[df2.isnull().any(axis=0)]].isnull()
df2[missing_columns] = df2[missing_columns].astype(int) # replace true as 1, false as 0
df_missing_corr = df2.corr()[
missing_columns] # compute correlations between features containing missing values and other features
print("Missing correlation between features containing missing values and other features")
flag_mar = False
# test if there is some correlation of a value being missed in feature and the value of any other of the features
for col in df_missing_corr:
list_high_corr = []
list_high_corr = list_high_corr + (df_missing_corr[col].index[df_missing_corr[col] > 0.6].tolist())
list_high_corr.remove(int(col))
# print(list_high_corr)
if list_high_corr:
flag_mar = True
if flag_mar:
print('Missing mechanism is probably missing at random')
else:
print('Missing mechanism is probably missing completely at random')
def missing_preprocess(features, df=None):
"""Drops the redundant information.
Redundant information is dropped before imputation. Detects and
drops empty rows. Detects features and instances with extreme large
proportion of missing data and reports to the user.
Parameters
----------
features : list
List of feature names.
df : DataFrame
Returns
-------
df : DataFrame
New DataFrame where redundant information may have been deleted.
features_new: list
List of feature names after preprocessing.
"""
# number of missing in each row
# print(df.isnull().sum(axis=1))
# number of missing in each feature
# print(df.isnull().sum())
# number of instances
num_instances = df.shape[0]
# number of features
num_features = df.shape[1]
# detect empty rows
if any(df.isnull().sum(axis=1) == num_features):
print(df[df.isnull().sum(axis=1) == num_features])
print("Above empty rows are detected and removed \n")
df = df.dropna(how='all') # remove empty rows
large_missing_cols = [] # list of columns with extreme large proportion of missing data
for col in df.columns[:-1]: # exclude target class
if df[col].isnull().sum() > 0.9 * num_instances:
large_missing_cols.append(col)
if large_missing_cols:
print("Feature {} has extreme large proportion of missing data".format(large_missing_cols))
ans = input('Do you want to delete the above features? [y/n]')
if ans == 'y':
df.drop(large_missing_cols, 1, inplace=True)
else:
pass
print(df.columns)
features_new = df.columns.values
return df, features_new
def compute_imputation_score(Xy):
"""Computes score of the imputation by applying simple classifiers.
The following simple learners are evaluated:
Naive Bayes Learner;
Linear Discriminant Learner;
One Nearest Neighbor Learner;
Decision Node Learner.
Parameters
----------
Xy : array-like
Complete numpy array of the dataset. The training array X has to be imputed
already, and the target y is required here and not optional in order to
predict the performance of the imputation method.
Returns
-------
imputation_score : float
Predicted score of the imputation method.
"""
X = Xy[:, :-1]
# print(X.dtype)
y = Xy[:, -1]
y = y.astype('int')
# print(y.dtype)
scores = []
naive_bayes = GaussianNB()
decision_node = DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=1, random_state=0)
linear_discriminant_analysis = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
one_nearest_neighbor = KNeighborsClassifier(n_neighbors=1)
classifiers = [naive_bayes, decision_node, linear_discriminant_analysis, one_nearest_neighbor]
for classifier in classifiers:
# compute accuracy score for each simple classifier
score = np.mean(cross_val_score(classifier, X, y, cv=5, scoring='accuracy', n_jobs=-1))
# print("Score of {} is {}".format(classifier, score))
scores.append(score)
imputation_score = np.mean(scores)
return imputation_score
def deal_mcar(df):
"""Deal with missing data with missing completely at random pattern."""
# number of instances
num_instances = df.shape[0]
# number of rows containing missing
num_missing_instances = df.isnull().sum(axis=1).astype(bool).sum()
# missing percentage
missing_percentage = num_missing_instances / num_instances
print("Missing percentage is {}".format(missing_percentage))
if missing_percentage < 0.05:
recommend = 'list deletion'
else:
Xy_incomplete = df.values
# mean
Xy_filled_mean = Imputer(missing_values=np.nan, strategy='mean').fit_transform(Xy_incomplete)
score_mean = compute_imputation_score(Xy_filled_mean)
print("Imputation score of mean is {}".format(score_mean))
# mode
Xy_filled_mode = Imputer(missing_values=np.nan, strategy='most_frequent').fit_transform(Xy_incomplete)
score_mode = compute_imputation_score(Xy_filled_mode)
print("Imputation score of mode is {}".format(score_mode))
# knn
with NoStdStreams():
Xy_filled_knn = KNN().fit_transform(Xy_incomplete);
score_knn = compute_imputation_score(Xy_filled_knn)
print("Imputation score of knn is {}".format(score_knn))
# matrix factorization
with NoStdStreams():
Xy_filled_mf = MatrixFactorization().fit_transform(Xy_incomplete);
score_mf = compute_imputation_score(Xy_filled_mf)
print("Imputation score of matrix factorization is {}".format(score_knn))
# multiple imputation
with NoStdStreams():
Xy_filled_ii = IterativeImputer().fit_transform(Xy_incomplete)
score_ii = compute_imputation_score(Xy_filled_ii)
print("Imputation score of multiple imputation is {}".format(score_ii))
score_dict = {'mean': score_mean, 'mode': score_mode, 'knn': score_knn,
'matrix factorization': score_mf, 'multiple imputation': score_ii}
print("Imputation method with the highest socre is {}".format(max(score_dict, key=score_dict.get)))
recommend = max(score_dict, key=score_dict.get)
return recommend
def deal_mar(df):
"""Deal with missing data with missing at random pattern."""
Xy_incomplete = df.values
# knn
with NoStdStreams():
Xy_filled_knn = KNN().fit_transform(Xy_incomplete);
score_knn = compute_imputation_score(Xy_filled_knn)
print("Imputation score of knn is {}".format(score_knn))
# matrix factorization
with NoStdStreams():
Xy_filled_mf = MatrixFactorization().fit_transform(Xy_incomplete);
score_mf = compute_imputation_score(Xy_filled_mf)
print("Imputation score of matrix factorization is {}".format(score_knn))
# multiple imputation
with NoStdStreams():
Xy_filled_ii = IterativeImputer().fit_transform(Xy_incomplete)
score_ii = compute_imputation_score(Xy_filled_ii)
print("Imputation score of multiple imputation is {}".format(score_ii))
score_dict = {'knn': score_knn,
'matrix factorization': score_mf, 'multiple imputation': score_ii}
print("Imputation method with the highest socre is {}".format(max(score_dict, key=score_dict.get)))
recommend = max(score_dict, key=score_dict.get)
return recommend
def deal_mnar(df):
"""Deal with missing data with missing at random pattern."""
recommend = 'multiple imputation'
return recommend
def clean_missing(df, features, setting):
"""Clean missing values in the dataset.
Parameters
----------
df : DataFrame
features : List
List of feature names.
Returns
-------
features_new : List
List of feature names after cleaning.
Xy_filled : array-like
Numpy array where missing values have been cleaned.
"""
df_preprocessed, features_new = missing_preprocess(df, features)
if setting == 'mcar':
recommend = deal_mcar(df_preprocessed)
elif setting == 'mar':
recommend = deal_mar(df_preprocessed)
elif setting == 'mnar':
recommend = deal_mnar(df_preprocessed)
else:
print("Default MAR")
recommend = deal_mar(df_preprocessed)
if recommend == 'mean':
print("Applying mean imputation ...")
Xy_filled = Imputer(missing_values=np.nan, strategy='mean').fit_transform(df_preprocessed.values)
print("Missing values cleaned!")
elif recommend == 'mode':
print("Applying mode imputation ...")
Xy_filled = Imputer(missing_values=np.nan, strategy='most_frequent').fit_transform(df_preprocessed.values)
print("Missing values cleaned!")
elif recommend == 'knn':
print("Applying knn imputation ...")
with NoStdStreams():
Xy_filled = KNN().fit_transform(df_preprocessed.values);
print("Missing values cleaned!")
elif recommend == 'matrix factorization':
print("Applying matrix factorization ...")
with NoStdStreams():
Xy_filled = MatrixFactorization().fit_transform(df_preprocessed.values);
print("Missing values cleaned!")
elif recommend == 'multiple imputation':
print("Applying multiple imputation ...")
with NoStdStreams():
Xy_filled = IterativeImputer().fit_transform(df_preprocessed.values)
print("Missing values cleaned!")
else:
print("Error: Approach not available!")
return features_new, Xy_filled
def handle_missing(df, setting='mar', na_values=['n/a', 'na', '--', '?']):
| |
#!/usr/bin/env python
# Classifier using HuggingFace Transformer (by Albert)
import importlib
import pandas as pd
import logging
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import torch
import os
import sys
import glob
from tqdm import tqdm
SRC_PATH = './src'
sys.path.insert(0, SRC_PATH)
from transformers import Trainer, TrainingArguments, Pipeline
os.environ["WANDB_DISABLED"] = "true"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# model param => TYPE, Model Class, Tokenizer Class, Tokenizer file, pre trained Model file, Type (pt=pytorch, tf=tensorflow)
model_param_albert_pt = { 'model_name': 'ALBERT', 'model': 'AlbertForSequenceClassification', 'tokenizer' : 'BertTokenizerFast', 'model_file': 'model/base/bert-kor-base', 'tokenizer_file' : 'model/base/bert-kor-base', 'framework': 'pt'}
model_param_bert_pt = { 'model_name': 'BERT', 'model': 'BertForSequenceClassification', 'tokenizer' : 'BertTokenizerFast', 'model_file': 'kykim/bert-kor-base', 'tokenizer_file' : 'kykim/bert-kor-base', 'framework': 'pt'}
model_param_electa_pt = { 'model_name': 'BERT', 'model': 'ElectraForSequenceClassification', 'tokenizer' : 'ElectraTokenizerFast', 'model_file': 'kykim/electra-kor-base', 'tokenizer_file' : 'kykim/electra-kor-base', 'framework': 'pt'}
NEWS_OM_MODEL = "model/news_om"
formatter = logging.Formatter(fmt='%(levelname)s: %(name)s: %(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger('CLF_SVR')
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler('logs/service.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class PytorchDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
item["labels"] = torch.tensor([self.labels[idx]])
return item
def __len__(self):
return len(self.labels)
class HuggingClassifier():
def __init__(self, modelParam, train_mode=False):
self.model_name = modelParam['model_name']
tmp = importlib.import_module('transformers')
self.tokenizerModule = getattr(tmp, modelParam['tokenizer'])
self.modelModule = getattr(tmp, modelParam['model'])
self.tokenizer_file = modelParam['tokenizer_file']
self.pretrained_model_file = modelParam['model_file']
assert(modelParam['framework'] == 'pt' or modelParam['framework'] == 'tf')
self.framework = modelParam['framework']
self.model = None
self.tokenizer = None
#self.clf_util = TextUtil()
self.train_mode = train_mode
self.labels = None
self.prediction_model_loaded = False
def __category_files_to_data_file(self, data_folder, category_level, encoding='utf-8'):
# iterate directory (as category name) and text file => data set
elems = glob.glob(data_folder + '/**/*.txt', recursive=True)
#data_set = pd.DataFrame(columns=['category', 'text'])
data_lst = []
data_file = data_folder+"/clf.dat"
for elem in elems:
logger.info("Input File: " + str(elem))
head_tail = os.path.split(elem)
file_path = head_tail[0]
file_name = head_tail[1]
tmp = file_path.replace(data_folder + '/', '')
tmp_l = tmp.split('/')
sub_count = len(tmp_l)
categories = []
if category_level:
for i in range(1, sub_count+1):
if i > 1:
category = "##".join(tmp_l[0:i])
else:
category = tmp_l[0]
categories.append(category)
else:
category = tmp_l[-1]
categories.append(category)
with open(elem, 'r', encoding=encoding) as train_input:
sentences = train_input.readlines()
for category in categories:
logger.info("Category: " + str(category))
for sentence in tqdm(sentences, desc=category):
sentence = sentence.strip()
if sentence != "":
data_lst.append({'category': category, 'text': sentence})
data_set = pd.DataFrame(data_lst)
# save data set to file
logger.info("Writing dataset [" + str(len(data_set)) + "] file : " + str(data_file))
data_set.to_csv(data_file, index=False, header=None, sep='\t')
return data_file
def __read_data_file(self, data_file, columns = ['category', 'text'], header=None, index_col=None, encoding='utf-8'):
logger.debug("TextUtil::read_data_file() is called...")
df = pd.read_csv(data_file, delimiter='\t', names=columns, header=header, index_col=index_col, encoding=encoding)
logger.debug(df)
return df
def get_clf_info(self):
# get clf info
info = []
info.append({'model_name':self.model_name})
info.append({'model_dir':self.model_dir})
info.append({'tokenizer_file:': self.tokenizer_file})
info.append({'pretrained_dir:': self.pretrained_model_file})
info.append({'deep learning framework': self.framework})
return info
def load_prediction_model(self, model_dir, num_categories=-1, labels=None):
if self.train_mode:
logger.error("train_mode is not False for prediction model loading")
return
if self.prediction_model_loaded:
logger.error("clf model is already loaded...")
return
logger.info(">>> Loading clf model file : " + str(model_dir))
logger.info(">>> Loading Transformer model...")
self.labels = labels
self.model_dir = model_dir
if num_categories == -1 or labels == None: # not setting
self.labels = self.clf_util.get_labels_from_categories(model_dir+'/tmp.dat')
num_categories = len(self.labels)
logger.info(">>> Number of categories : " + str(num_categories))
self.model = self.modelModule.from_pretrained(self.model_dir, num_labels=num_categories, local_files_only=True)
self.tokenizer = self.tokenizerModule.from_pretrained(self.model_dir, local_files_only=True)
self.prediction_model_loaded = True
logger.info(">>> Prediction model is loaded...")
def predict(self, input_text, cut_off=0.7, max_length=100):
if self.prediction_model_loaded == False:
logger.error("Prediction Model is not loaded...")
return None
input_sentences = []
#logger.debug(">>> Input: " + str(input_text))
input_sentences.append(input_text)
encoding_ = self.tokenizer(input_sentences, truncation=True, padding=True, max_length=max_length)
result_ = self.model(torch.tensor(encoding_['input_ids']))
proba = float(torch.nn.functional.softmax(result_['logits'], dim=1).max())
#logger.debug(">>> Proba : " + str(proba))
if proba >= float(cut_off):
pred = self.labels[np.argmax(result_['logits'].tolist())]
else:
pred = None
logger.debug(">>> Cut off value [" + str(cut_off) + "] is bigger than proba[ " + str(proba) + "]")
#logger.debug(">>> Result: " + str(pred))
result_ = {'sentence':input_text, 'pred':pred, 'prob':round(proba,3)}
logger.info(result_)
return result_
def prediction(self, input_text, cut_off=0.7, max_length=100):
if self.prediction_model_loaded == False:
logger.error("Prediction Model is not loaded...")
return None
input_sentences = []
#logger.debug(">>> Input: " + str(input_text))
input_sentences.append(input_text)
encoding_ = self.tokenizer(input_sentences, truncation=True, padding=True, max_length=max_length)
result_ = self.model(torch.tensor(encoding_['input_ids']))
proba = float(torch.nn.functional.softmax(result_['logits'], dim=1).max())
#logger.debug(">>> Proba : " + str(proba))
if proba >= float(cut_off):
pred = self.labels[np.argmax(result_['logits'].tolist())]
else:
pred = None
logger.debug(">>> Cut off value [" + str(cut_off) + "] is bigger than proba[ " + str(proba) + "]")
#logger.debug(">>> Result: " + str(pred))
# result_ = {'sentence':input_text, 'pred':pred, 'prob':round(proba,3)}
result_ = [pred, round(proba,3)]
# logger.info(result_)
return result_
def compute_metrics(self, pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
}
def train(self, model_dir, data_file, max_seq_length = 100, learning_rate=1e-4 , test_size=0.1, batch_size=3, epochs=1):
train_start = datetime.datetime.now()
logger.info(">> Training Start ....")
logger.info("[Step 1] Transformer Classifier Check configuration...")
logger.info(">>> Data File: " + str(data_file))
logger.info(">>> Model Name: " + str(self.model_name))
logger.info(">>> Model Dir: " + str(model_dir))
logger.info(">>> Max Seq Length: " + str(max_seq_length))
logger.info(">>> Learning Rate: " + str(learning_rate))
logger.info(">>> Batch Size: " + str(batch_size))
logger.info(">>> Epoch : " + str(epochs))
logger.info(">>> Test Size : " + str(test_size))
self.model_name = model_dir
# dataset read
self.test_size = test_size
logger.info("[Step 2] Loading Learning Data...")
df = self.__read_data_file(data_file)
num_categories = len(df['category'].unique())
logger.info(">>> Number of category : " + str(num_categories))
logger.info(">>> Train, Test Data split...")
# data set to train / test data set
Y = pd.get_dummies(df['category'], dtype=np.int8) # Category Vector
x_train, x_test, y_train, y_test = train_test_split(df['text'], Y, test_size=self.test_size, random_state=42)
# load the model and tokenizer
logger.info("[Step 3] Loading Transformer model...")
self.model = self.modelModule.from_pretrained(self.pretrained_model_file, num_labels=num_categories)
self.tokenizer = self.tokenizerModule.from_pretrained(self.tokenizer_file)
logger.info("[Step 4] Encoding Text...")
train_encodings = self.tokenizer(x_train.values.tolist(), truncation=True, padding=True, max_length=max_seq_length)
test_encodings = self.tokenizer(x_test.values.tolist(), truncation=True, padding=True, max_length=max_seq_length)
# convert our tokenized data into a torch Dataset
logger.info("[Step 5] Parameter preparation...")
y_train_list = np.argmax(y_train.values.tolist(), axis=1)
y_test_list = np.argmax(y_test.values.tolist(), axis=1)
train_dataset = PytorchDataset(train_encodings, y_train_list)
test_dataset = PytorchDataset(test_encodings, y_test_list)
training_args = TrainingArguments(
output_dir='tmp',
overwrite_output_dir = True,
num_train_epochs=epochs, # total number of training epochs
per_device_train_batch_size=batch_size, # batch size per device during training
per_device_eval_batch_size=batch_size, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
load_best_model_at_end=True, # load the best model when finished training (default metric is loss)
# but you can specify `metric_for_best_model` argument to change to accuracy or other metric
logging_steps=500, # log & save weights each logging_steps
evaluation_strategy="steps", # evaluate each `logging_steps`
)
logger.info("[Step 6] Learning.... ")
trainer = Trainer(
model=self.model, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=test_dataset, # evaluation dataset
compute_metrics=self.compute_metrics, # the callback that computes metrics of interest
)
# train the model
trainer.train()
logger.info("[Step 7] Saving trained model : " + str(model_dir))
trainer.save_model(model_dir) # classification model
self.tokenizer.save_pretrained(model_dir) # save tokenizer
self.clf_util.copy_input_data_to_model_folder(data_file, model_dir)
# evaluate the current model after training
logger.info("[Step 8] Evaluation...")
y_pred = np.argmax(trainer.predict(test_dataset)[0], axis=1)
y_labels = y_test.columns
self.clf_util.evaluation(self.model_name, x_test, y_test.values.argmax(axis=1), y_pred, y_labels, model_dir + '/evaluation.txt')
train_end = datetime.datetime.now()
logger.critical("Elapsed time: " + str(train_end - train_start))
logger.info(">>> Training End...")
def train_folder(self, model_dir, data_dir, kor_postagging=False, category_level=False,
balance_samples = True, max_tokens=-1, min_token_len=-1, encoding='utf-8',
max_seq_length = 100, test_size=0.1, learning_rate=1e-4,
use_cached_data_file=True, batch_size=2, epochs=1):
logger.info("[PREP] Categorized training data ==> Merged training data")
if self.train_mode == False:
print("[Error] Plese set train_mode to True !!!")
return
data_file = self.__category_files_to_data_file(data_dir, category_level=category_level, encoding=encoding)
self.train(model_dir, data_file = data_file, max_seq_length=max_seq_length, learning_rate=learning_rate, batch_size=batch_size, test_size=test_size, epochs=epochs)
if __name__ == '__main__':
def training():
data_folder = "raw_data/news_om" #"raw_data/naver_movie_om" #om_test" #"raw_data/news/tmp"
model_dir = "model/news_om_clf2" #'model/kor_bert_news_clf'
clf = HuggingClassifier(modelParam = model_param_electa_pt, train_mode=True) # BERT
clf.train_folder(model_dir=model_dir, data_dir=data_folder, batch_size=48, epochs=1)
def prediction_test():
#set_debug_log()
test_sentences=[
"퇴사 1년 만에 '특허 괴물' 돌변…前 임원 공격에 삼성 '발칵'",
"삼성전자 TV, CES 2022서 최고 제품상 석권",
"'삼성전자, 반도체 수요 증가 기대'-KB증권",
"삼성·LG, 사상 최대 매출…인텔·월풀 따라 잡았다",
"[김대호 박사의 오늘 기업·사람] 씨티그룹·삼성전자·카카오·CATL",
"삼성전자 신제품 발표로 기대감 주가 상승",
"거리두기에 1분기 소매경기 '주춤'…부정전망 더 많아",
"광주지역 소매·유통업 2022년 1분기 경기 '호전' 전망",
"1분기 소매유통업 경기전망 싸늘…온라인·백화점만 ‘방긋’",
"회복세 띠던 소매 경기···올 1분기엔 ‘냉랭’",
"소매경기, 1분기 다시 위축된다…온라인·백화점은 기대감 '솔솔'",
"‘주식 먹튀’ 논란에…류영준 카카오 대표이사 후보자 자진사퇴",
"류영준 사퇴에도…카카오 10만원 | |
import unittest
import numpy
from typing import Dict, Any, List
from qupulse._program.instructions import InstructionBlock, InstructionPointer,\
Trigger, CJMPInstruction, REPJInstruction, GOTOInstruction, EXECInstruction, STOPInstruction,\
InstructionSequence, AbstractInstructionBlock, ImmutableInstructionBlock, Instruction, CHANInstruction
from tests.pulses.sequencing_dummies import DummyWaveform, DummyInstructionBlock
class InstructionPointerTest(unittest.TestCase):
def test_invalid_offset(self) -> None:
block = InstructionBlock()
with self.assertRaises(ValueError):
InstructionPointer(block, -1)
with self.assertRaises(ValueError):
InstructionPointer(block, -12)
def test_initialization_main_block(self) -> None:
block = InstructionBlock()
for offset in [0, 1, 924]:
ip = InstructionPointer(block, offset)
self.assertIs(block, ip.block)
self.assertEqual(offset, ip.offset)
def test_initialization_relative_block(self) -> None:
block = InstructionBlock()
for offset in [0, 1, 924]:
ip = InstructionPointer(block, offset)
self.assertIs(block, ip.block)
self.assertEqual(offset, ip.offset)
def test_equality(self) -> None:
blocks = [InstructionBlock(), InstructionBlock()]
blocks.append(InstructionBlock())
ips = []
for block in blocks:
for offset in [0, 1, 2352]:
ip = InstructionPointer(block, offset)
self.assertEqual(ip, ip)
for other in ips:
self.assertNotEqual(ip, other)
self.assertNotEqual(other, ip)
self.assertNotEqual(hash(ip), hash(other))
ips.append(ip)
class TriggerTest(unittest.TestCase):
def test_equality(self) -> None:
t1 = Trigger()
t2 = Trigger()
self.assertEqual(t1, t1)
self.assertNotEqual(t1, t2)
self.assertNotEqual(t2, t1)
self.assertNotEqual(hash(t1), hash(t2))
class CJMPInstructionTest(unittest.TestCase):
def test_initialization(self) -> None:
block = InstructionBlock()
trigger = Trigger()
for offset in [0, 1, 23]:
instr = CJMPInstruction(trigger, InstructionPointer(block, offset))
self.assertEqual(trigger, instr.trigger)
self.assertEqual(block, instr.target.block)
self.assertEqual(offset, instr.target.offset)
def test_equality(self) -> None:
blocks = [InstructionBlock(), InstructionBlock()]
for offset in [0, 1, 23]:
instrA = CJMPInstruction(0, InstructionPointer(blocks[0], offset))
instrB = CJMPInstruction(0, InstructionPointer(blocks[0], offset))
self.assertEqual(instrA, instrB)
self.assertEqual(instrB, instrA)
instrs = []
for trigger in [Trigger(), Trigger()]:
for block in blocks:
for offset in [0, 17]:
instruction = CJMPInstruction(trigger, InstructionPointer(block, offset))
self.assertEqual(instruction, instruction)
for other in instrs:
self.assertNotEqual(instruction, other)
self.assertNotEqual(other, instruction)
self.assertNotEqual(hash(instruction), hash(other))
instrs.append(instruction)
def test_str(self) -> None:
block = DummyInstructionBlock()
trigger = Trigger()
instr = CJMPInstruction(trigger, InstructionPointer(block, 3))
self.assertEqual("cjmp to {} on {}".format(InstructionPointer(block, 3), trigger), str(instr))
class REPJInstructionTest(unittest.TestCase):
def test_initialization(self) -> None:
block = InstructionBlock()
for count in [0, 1, 47]:
for offset in [0, 1, 23]:
instr = REPJInstruction(count, InstructionPointer(block, offset))
self.assertEqual(count, instr.count)
self.assertEqual(block, instr.target.block)
self.assertEqual(offset, instr.target.offset)
def test_negative_count(self) -> None:
with self.assertRaises(ValueError):
REPJInstruction(-3, InstructionPointer(InstructionBlock))
def test_equality(self) -> None:
blocks = [InstructionBlock(), InstructionBlock()]
for count in [0, 1, 47]:
for offset in [0, 1, 23]:
instrA = REPJInstruction(count, InstructionPointer(blocks[0], offset))
instrB = REPJInstruction(count, InstructionPointer(blocks[0], offset))
self.assertEqual(instrA, instrB)
self.assertEqual(instrB, instrA)
instrs = []
for count in [0, 1, 43]:
for block in blocks:
for offset in [0, 17]:
instruction = REPJInstruction(count, InstructionPointer(block, offset))
self.assertEqual(instruction, instruction)
for other in instrs:
self.assertNotEqual(instruction, other)
self.assertNotEqual(other, instruction)
self.assertNotEqual(hash(instruction), hash(other))
instrs.append(instruction)
def test_str(self) -> None:
block = DummyInstructionBlock()
instr = REPJInstruction(7, InstructionPointer(block, 3))
self.assertEqual("repj {} times to {}".format(7, InstructionPointer(block, 3)), str(instr))
class GOTOInstructionTest(unittest.TestCase):
def test_initialization(self) -> None:
block = InstructionBlock()
for offset in [0, 1, 23]:
instr = GOTOInstruction(InstructionPointer(block, offset))
self.assertIs(block, instr.target.block)
self.assertEqual(offset, instr.target.offset)
def test_equality(self) -> None:
blocks = [InstructionBlock(), InstructionBlock()]
for offset in [0, 1, 23]:
instrA = GOTOInstruction(InstructionPointer(blocks[0], offset))
instrB = GOTOInstruction(InstructionPointer(blocks[0], offset))
self.assertEqual(instrA, instrB)
self.assertEqual(instrB, instrA)
instrs = []
for block in blocks:
for offset in [0, 17]:
instruction = GOTOInstruction(InstructionPointer(block, offset))
self.assertEqual(instruction, instruction)
for other in instrs:
self.assertNotEqual(instruction, other)
self.assertNotEqual(other, instruction)
self.assertNotEqual(hash(instruction), hash(other))
instrs.append(instruction)
def test_str(self) -> None:
block = DummyInstructionBlock()
instr = GOTOInstruction(InstructionPointer(block, 3))
self.assertEqual("goto to {}".format(str(InstructionPointer(block, 3))), str(instr))
class CHANInstructionTest(unittest.TestCase):
def test_compare_key(self):
c_to_i = dict(a=5)
instr = CHANInstruction(c_to_i)
self.assertIs(instr.compare_key, c_to_i)
self.assertIs(instr.channel_to_instruction_block, c_to_i)
def test_get_item(self):
c_to_i = dict(a='b')
instr = CHANInstruction(c_to_i)
self.assertIs(instr['a'], c_to_i['a'])
def test_str(self):
c_to_i = dict(a='b', c='d')
instr = CHANInstruction(c_to_i)
self.assertEqual(str(instr), 'chan b for a, d for c')
class EXECInstructionTest(unittest.TestCase):
def test_initialization(self):
waveform = DummyWaveform()
instr = EXECInstruction(waveform)
self.assertIs(waveform, instr.waveform)
def test_equality(self):
wf1 = DummyWaveform()
wf2 = DummyWaveform()
instr11 = EXECInstruction(wf1)
instr12 = EXECInstruction(wf1)
instr20 = EXECInstruction(wf2)
self.assertEqual(instr11, instr11)
self.assertEqual(instr11, instr12)
self.assertEqual(instr12, instr11)
self.assertNotEqual(instr11, instr20)
self.assertNotEqual(instr20, instr11)
self.assertEqual(hash(instr11), hash(instr12))
self.assertNotEqual(hash(instr11), hash(instr20))
def test_str(self) -> None:
wf = DummyWaveform()
instr = EXECInstruction(wf)
self.assertEqual("exec {}".format(str(wf)), str(instr))
class STOPInstructionTest(unittest.TestCase):
def test_str(self):
instr = STOPInstruction()
self.assertEqual('stop', str(instr))
def test_equality(self):
instr1 = STOPInstruction()
instr2 = STOPInstruction()
self.assertEqual(instr1, instr1)
self.assertEqual(instr1, instr2)
self.assertEqual(instr2, instr1)
self.assertEqual(hash(instr1), hash(instr2))
class AbstractInstructionBlockStub(AbstractInstructionBlock):
def __init__(self, instructions: List[Instruction], return_ip: InstructionPointer) -> None:
super().__init__()
self.__instructions = instructions
self.__return_ip = return_ip
@property
def instructions(self) -> List[Instruction]:
return self.__instructions
@property
def return_ip(self) -> InstructionPointer:
return self.__return_ip
@property
def compare_key(self) -> Any:
return id(self)
class AbstractInstructionBlockTest(unittest.TestCase):
def test_len_empty(self) -> None:
block = AbstractInstructionBlockStub([], None)
self.assertEqual(1, len(block))
self.assertEqual(0, len(block.instructions))
def test_len(self) -> None:
block = AbstractInstructionBlockStub([EXECInstruction(DummyWaveform())], None)
self.assertEqual(2, len(block))
self.assertEqual(1, len(block.instructions))
def test_iterable_empty_no_return(self) -> None:
block = AbstractInstructionBlockStub([], None)
count = 0
for instruction in block:
self.assertEqual(0, count)
self.assertIsInstance(instruction, STOPInstruction)
count += 1
def test_iterable_empty_return(self) -> None:
parent_block = InstructionBlock()
block = AbstractInstructionBlockStub([], InstructionPointer(parent_block, 13))
count = 0
for instruction in block:
self.assertEqual(0, count)
self.assertIsInstance(instruction, GOTOInstruction)
self.assertEqual(InstructionPointer(parent_block, 13), instruction.target)
count += 1
def test_iterable_no_return(self) -> None:
wf = DummyWaveform()
block = AbstractInstructionBlockStub([EXECInstruction(wf)], None)
count = 0
for expected_instruction, instruction in zip([EXECInstruction(wf), STOPInstruction()], block):
self.assertEqual(expected_instruction, instruction)
count += 1
self.assertEqual(2, count)
def test_iterable_return(self) -> None:
parent_block = InstructionBlock()
wf = DummyWaveform()
block = AbstractInstructionBlockStub([EXECInstruction(wf)], InstructionPointer(parent_block, 11))
count = 0
for expected_instruction, instruction in zip([EXECInstruction(wf), GOTOInstruction(InstructionPointer(parent_block, 11))], block):
self.assertEqual(expected_instruction, instruction)
count += 1
self.assertEqual(2, count);
def test_item_access_empty_no_return(self) -> None:
block = AbstractInstructionBlockStub([], None)
self.assertEqual(STOPInstruction(), block[0])
with self.assertRaises(IndexError):
block[1]
self.assertEqual(STOPInstruction(), block[-1])
with self.assertRaises(IndexError):
block[-2]
def test_item_access_empty_return(self) -> None:
parent_block = InstructionBlock()
block = AbstractInstructionBlockStub([], InstructionPointer(parent_block, 84))
self.assertEqual(GOTOInstruction(InstructionPointer(parent_block, 84)), block[0])
with self.assertRaises(IndexError):
block[1]
self.assertEqual(GOTOInstruction(InstructionPointer(parent_block, 84)), block[-1])
with self.assertRaises(IndexError):
block[-2]
def test_item_access_no_return(self) -> None:
wf = DummyWaveform()
block = AbstractInstructionBlockStub([EXECInstruction(wf)], None)
self.assertEqual(EXECInstruction(wf), block[0])
self.assertEqual(STOPInstruction(), block[1])
with self.assertRaises(IndexError):
block[2]
self.assertEqual(STOPInstruction(), block[-1])
self.assertEqual(EXECInstruction(wf), block[-2])
with self.assertRaises(IndexError):
block[-3]
def test_item_access_return(self) -> None:
wf = DummyWaveform()
parent_block = InstructionBlock()
block = AbstractInstructionBlockStub([EXECInstruction(wf)], InstructionPointer(parent_block, 29))
self.assertEqual(EXECInstruction(wf), block[0])
self.assertEqual(GOTOInstruction(InstructionPointer(parent_block, 29)), block[1])
with self.assertRaises(IndexError):
block[2]
self.assertEqual(GOTOInstruction(InstructionPointer(parent_block, 29)), block[-1])
self.assertEqual(EXECInstruction(wf), block[-2])
with self.assertRaises(IndexError):
block[-3]
def test_sliced_item_access(self) -> None:
wf = DummyWaveform()
parent_block = InstructionBlock()
block = AbstractInstructionBlockStub([EXECInstruction(wf), EXECInstruction(wf)], InstructionPointer(parent_block, 29))
for instruction in block[:-1]:
self.assertEqual(EXECInstruction(wf), instruction)
expections = [EXECInstruction(wf), EXECInstruction(wf), GOTOInstruction(InstructionPointer(parent_block, 29))]
for expected, instruction in zip(expections,block[:4]):
self.assertEqual(expected, instruction)
for instruction, expected in zip(block[::-1], reversed(expections)):
self.assertEqual(expected, instruction)
with self.assertRaises(StopIteration):
next(iter(block[3:]))
class InstructionBlockTest(unittest.TestCase):
def __init__(self, method_name: str) -> None:
super().__init__(method_name)
self.maxDiff = None
def __verify_block(self, block: InstructionBlock,
expected_instructions: InstructionSequence,
expected_compiled_instructions: InstructionSequence,
expected_return_ip: InstructionPointer) -> None:
self.assertEqual(len(expected_instructions), len(block.instructions))
self.assertEqual(expected_instructions, block.instructions)
self.assertEqual(expected_compiled_instructions, [x for x in block])
self.assertEqual(expected_return_ip, block.return_ip)
def test_empty_unreturning_block(self) -> None:
block = InstructionBlock()
self.__verify_block(block, [], [STOPInstruction()], None)
def test_empty_returning_block(self) -> None:
return_block = InstructionBlock()
block = InstructionBlock()
ip = InstructionPointer(return_block, 7)
block.return_ip = ip
self.__verify_block(block, [], [GOTOInstruction(ip)], ip)
def test_create_embedded_block(self) -> None:
parent_block = InstructionBlock()
block = InstructionBlock()
block.return_ip = InstructionPointer(parent_block, 18)
self.__verify_block(block, [], [GOTOInstruction(InstructionPointer(parent_block, 18))], InstructionPointer(parent_block, 18))
self.__verify_block(parent_block, [], [STOPInstruction()], None)
def test_add_instruction_exec(self) -> None:
block = InstructionBlock()
expected_instructions = []
waveforms = [DummyWaveform(), DummyWaveform(), DummyWaveform()]
LOOKUP = [0, 1, 1, 0, 2, 1, 0, 0, 0, 1, 2, 2]
for id in LOOKUP:
waveform = waveforms[id]
instruction = EXECInstruction(waveform)
expected_instructions.append(instruction)
block.add_instruction_exec(waveform)
expected_compiled_instructions = expected_instructions.copy()
expected_compiled_instructions.append(STOPInstruction())
self.__verify_block(block, expected_instructions, expected_compiled_instructions, None)
def test_add_instruction_goto(self) -> None:
block = InstructionBlock()
expected_instructions = []
targets = [InstructionBlock(), InstructionBlock(), InstructionBlock()]
LOOKUP = [0, 1, 1, 0, 2, 1, 0, 0, 0, 1, 2, 2]
for id in LOOKUP:
target = targets[id]
instruction = GOTOInstruction(InstructionPointer(target))
expected_instructions.append(instruction)
block.add_instruction_goto(target)
expected_compiled_instructions = expected_instructions.copy()
expected_compiled_instructions.append(STOPInstruction())
self.__verify_block(block, expected_instructions, expected_compiled_instructions, None)
def test_add_instruction_cjmp(self) -> None:
block = InstructionBlock()
expected_instructions = []
targets = [InstructionBlock(), InstructionBlock(), InstructionBlock()]
triggers = [Trigger(), Trigger()]
LOOKUP = [(0, 0), (1, 0), (1, 1), (0, 1), (2, 0), (1, 0), (0, 1), (0, 1), (0, 0), (1, 0), (2, 1), (2, 1)]
for i in LOOKUP:
block.add_instruction_cjmp(triggers[i[1]], targets[i[0]])
expected_instructions.append(CJMPInstruction(triggers[i[1]], InstructionPointer(targets[i[0]], 0)))
expected_compiled_instructions = expected_instructions.copy()
expected_compiled_instructions.append(STOPInstruction())
self.__verify_block(block, expected_instructions, expected_compiled_instructions, None)
def test_add_instruction_repj(self) -> None:
block = InstructionBlock()
expected_instructions = []
targets = [InstructionBlock(), InstructionBlock(), InstructionBlock()]
counts = [3, 8, 857]
LOOKUP = [(0, 0), (0, 1), (1, 1), (0, 2), (2, 0), (1, 0), (2, 2), (2, 1), (1, 0), (1,2)]
for i in LOOKUP:
block.add_instruction_repj(counts[i[0]], targets[i[1]])
expected_instructions.append(REPJInstruction(counts[i[0]], InstructionPointer(targets[i[1]], 0)))
expected_compiled_instructions = expected_instructions.copy()
expected_compiled_instructions.append(STOPInstruction())
self.__verify_block(block, expected_instructions, expected_compiled_instructions, None)
def test_add_instruction_stop(self) -> None:
block = InstructionBlock()
expected_instructions = [STOPInstruction(), STOPInstruction()]
block.add_instruction_stop()
block.add_instruction_stop()
expected_compiled_instructions = expected_instructions.copy()
expected_compiled_instructions.append(STOPInstruction())
self.__verify_block(block, expected_instructions, expected_compiled_instructions, None)
def test_nested_block_construction(self) -> None:
main_block = InstructionBlock()
expected_instructions = [[], | |
.validators.v1_3_3.jsd_979688084b7ba60d \
import JSONSchemaValidator979688084B7BA60D \
as JSONSchemaValidator979688084B7BA60D_v1_3_3
from .validators.v1_3_3.jsd_98a39bf4485a9871 \
import JSONSchemaValidator98A39Bf4485A9871 \
as JSONSchemaValidator98A39Bf4485A9871_v1_3_3
from .validators.v1_3_3.jsd_99872a134d0a9fb4 \
import JSONSchemaValidator99872A134D0A9Fb4 \
as JSONSchemaValidator99872A134D0A9Fb4_v1_3_3
from .validators.v1_3_3.jsd_9ba14a9e441b8a60 \
import JSONSchemaValidator9Ba14A9E441B8A60 \
as JSONSchemaValidator9Ba14A9E441B8A60_v1_3_3
from .validators.v1_3_3.jsd_9c9a785741cbb41f \
import JSONSchemaValidator9C9A785741CbB41F \
as JSONSchemaValidator9C9A785741CbB41F_v1_3_3
from .validators.v1_3_3.jsd_9cb2cb3f494a824f \
import JSONSchemaValidator9Cb2Cb3F494A824F \
as JSONSchemaValidator9Cb2Cb3F494A824F_v1_3_3
from .validators.v1_3_3.jsd_9e857b5a4a0bbcdb \
import JSONSchemaValidator9E857B5A4A0BBcdb \
as JSONSchemaValidator9E857B5A4A0BBcdb_v1_3_3
from .validators.v1_3_3.jsd_a1a9387346ba92b1 \
import JSONSchemaValidatorA1A9387346Ba92B1 \
as JSONSchemaValidatorA1A9387346Ba92B1_v1_3_3
from .validators.v1_3_3.jsd_a395fae644ca899c \
import JSONSchemaValidatorA395Fae644Ca899C \
as JSONSchemaValidatorA395Fae644Ca899C_v1_3_3
from .validators.v1_3_3.jsd_a39a1a214debb781 \
import JSONSchemaValidatorA39A1A214DebB781 \
as JSONSchemaValidatorA39A1A214DebB781_v1_3_3
from .validators.v1_3_3.jsd_a4967be64dfaaa1a \
import JSONSchemaValidatorA4967Be64DfaAa1A \
as JSONSchemaValidatorA4967Be64DfaAa1A_v1_3_3
from .validators.v1_3_3.jsd_a4a1e8ed41cb9653 \
import JSONSchemaValidatorA4A1E8Ed41Cb9653 \
as JSONSchemaValidatorA4A1E8Ed41Cb9653_v1_3_3
from .validators.v1_3_3.jsd_a4b6c87a4ffb9efa \
import JSONSchemaValidatorA4B6C87A4Ffb9Efa \
as JSONSchemaValidatorA4B6C87A4Ffb9Efa_v1_3_3
from .validators.v1_3_3.jsd_a5ac99774c6bb541 \
import JSONSchemaValidatorA5Ac99774C6BB541 \
as JSONSchemaValidatorA5Ac99774C6BB541_v1_3_3
from .validators.v1_3_3.jsd_a6965b454c9a8663 \
import JSONSchemaValidatorA6965B454C9A8663 \
as JSONSchemaValidatorA6965B454C9A8663_v1_3_3
from .validators.v1_3_3.jsd_a6b798ab4acaa34e \
import JSONSchemaValidatorA6B798Ab4AcaA34E \
as JSONSchemaValidatorA6B798Ab4AcaA34E_v1_3_3
from .validators.v1_3_3.jsd_a7b42836408a8e74 \
import JSONSchemaValidatorA7B42836408A8E74 \
as JSONSchemaValidatorA7B42836408A8E74_v1_3_3
from .validators.v1_3_3.jsd_aba4991d4e9b8747 \
import JSONSchemaValidatorAba4991D4E9B8747 \
as JSONSchemaValidatorAba4991D4E9B8747_v1_3_3
from .validators.v1_3_3.jsd_aeb4dad04a99bbe3 \
import JSONSchemaValidatorAeb4Dad04A99Bbe3 \
as JSONSchemaValidatorAeb4Dad04A99Bbe3_v1_3_3
from .validators.v1_3_3.jsd_aeb9eb67460b92df \
import JSONSchemaValidatorAeb9Eb67460B92Df \
as JSONSchemaValidatorAeb9Eb67460B92Df_v1_3_3
from .validators.v1_3_3.jsd_af8d7b0e470b8ae2 \
import JSONSchemaValidatorAf8D7B0E470B8Ae2 \
as JSONSchemaValidatorAf8D7B0E470B8Ae2_v1_3_3
from .validators.v1_3_3.jsd_b0b7eabc4f4b9b28 \
import JSONSchemaValidatorB0B7Eabc4F4B9B28 \
as JSONSchemaValidatorB0B7Eabc4F4B9B28_v1_3_3
from .validators.v1_3_3.jsd_b199685d4d089a67 \
import JSONSchemaValidatorB199685D4D089A67 \
as JSONSchemaValidatorB199685D4D089A67_v1_3_3
from .validators.v1_3_3.jsd_b2b8cb91459aa58f \
import JSONSchemaValidatorB2B8Cb91459AA58F \
as JSONSchemaValidatorB2B8Cb91459AA58F_v1_3_3
from .validators.v1_3_3.jsd_b3a1c8804c8b9b8b \
import JSONSchemaValidatorB3A1C8804C8B9B8B \
as JSONSchemaValidatorB3A1C8804C8B9B8B_v1_3_3
from .validators.v1_3_3.jsd_b68a6bd8473a9a25 \
import JSONSchemaValidatorB68A6Bd8473A9A25 \
as JSONSchemaValidatorB68A6Bd8473A9A25_v1_3_3
from .validators.v1_3_3.jsd_b78329674878b815 \
import JSONSchemaValidatorB78329674878B815 \
as JSONSchemaValidatorB78329674878B815_v1_3_3
from .validators.v1_3_3.jsd_b7bcaa084e2b90d0 \
import JSONSchemaValidatorB7BcAa084E2B90D0 \
as JSONSchemaValidatorB7BcAa084E2B90D0_v1_3_3
from .validators.v1_3_3.jsd_b888792d43baba46 \
import JSONSchemaValidatorB888792D43BaBa46 \
as JSONSchemaValidatorB888792D43BaBa46_v1_3_3
from .validators.v1_3_3.jsd_b9855ad54ae98156 \
import JSONSchemaValidatorB9855Ad54Ae98156 \
as JSONSchemaValidatorB9855Ad54Ae98156_v1_3_3
from .validators.v1_3_3.jsd_b9b48ac8463a8aba \
import JSONSchemaValidatorB9B48Ac8463A8Aba \
as JSONSchemaValidatorB9B48Ac8463A8Aba_v1_3_3
from .validators.v1_3_3.jsd_ba9dc85b4b8a9a17 \
import JSONSchemaValidatorBa9DC85B4B8A9A17 \
as JSONSchemaValidatorBa9DC85B4B8A9A17_v1_3_3
from .validators.v1_3_3.jsd_bab6c9e5440885cc \
import JSONSchemaValidatorBab6C9E5440885Cc \
as JSONSchemaValidatorBab6C9E5440885Cc_v1_3_3
from .validators.v1_3_3.jsd_bc8aab4746ca883d \
import JSONSchemaValidatorBc8AAb4746Ca883D \
as JSONSchemaValidatorBc8AAb4746Ca883D_v1_3_3
from .validators.v1_3_3.jsd_bca339d844c8a3c0 \
import JSONSchemaValidatorBca339D844C8A3C0 \
as JSONSchemaValidatorBca339D844C8A3C0_v1_3_3
from .validators.v1_3_3.jsd_be892bd84a78865a \
import JSONSchemaValidatorBe892Bd84A78865A \
as JSONSchemaValidatorBe892Bd84A78865A_v1_3_3
from .validators.v1_3_3.jsd_bead7b3443b996a7 \
import JSONSchemaValidatorBead7B3443B996A7 \
as JSONSchemaValidatorBead7B3443B996A7_v1_3_3
from .validators.v1_3_3.jsd_bf859ac64a0ba19c \
import JSONSchemaValidatorBf859Ac64A0BA19C \
as JSONSchemaValidatorBf859Ac64A0BA19C_v1_3_3
from .validators.v1_3_3.jsd_c0bca85643c8b58d \
import JSONSchemaValidatorC0BcA85643C8B58D \
as JSONSchemaValidatorC0BcA85643C8B58D_v1_3_3
from .validators.v1_3_3.jsd_c1a359b14c89b573 \
import JSONSchemaValidatorC1A359B14C89B573 \
as JSONSchemaValidatorC1A359B14C89B573_v1_3_3
from .validators.v1_3_3.jsd_c1ba9a424c08a01b \
import JSONSchemaValidatorC1Ba9A424C08A01B \
as JSONSchemaValidatorC1Ba9A424C08A01B_v1_3_3
from .validators.v1_3_3.jsd_c2a43ad24098baa7 \
import JSONSchemaValidatorC2A43Ad24098Baa7 \
as JSONSchemaValidatorC2A43Ad24098Baa7_v1_3_3
from .validators.v1_3_3.jsd_c2b5fb764d888375 \
import JSONSchemaValidatorC2B5Fb764D888375 \
as JSONSchemaValidatorC2B5Fb764D888375_v1_3_3
from .validators.v1_3_3.jsd_c3b3c9ef4e6b8a09 \
import JSONSchemaValidatorC3B3C9Ef4E6B8A09 \
as JSONSchemaValidatorC3B3C9Ef4E6B8A09_v1_3_3
from .validators.v1_3_3.jsd_c5acd9fa4c1a8abc \
import JSONSchemaValidatorC5AcD9Fa4C1A8Abc \
as JSONSchemaValidatorC5AcD9Fa4C1A8Abc_v1_3_3
from .validators.v1_3_3.jsd_c78c9ad245bb9657 \
import JSONSchemaValidatorC78C9Ad245Bb9657 \
as JSONSchemaValidatorC78C9Ad245Bb9657_v1_3_3
from .validators.v1_3_3.jsd_c7a6592b4b98a369 \
import JSONSchemaValidatorC7A6592B4B98A369 \
as JSONSchemaValidatorC7A6592B4B98A369_v1_3_3
from .validators.v1_3_3.jsd_c8bf6b65414a9bc7 \
import JSONSchemaValidatorC8Bf6B65414A9Bc7 \
as JSONSchemaValidatorC8Bf6B65414A9Bc7_v1_3_3
from .validators.v1_3_3.jsd_c9809b6744f8a502 \
import JSONSchemaValidatorC9809B6744F8A502 \
as JSONSchemaValidatorC9809B6744F8A502_v1_3_3
from .validators.v1_3_3.jsd_ca91da84401abba1 \
import JSONSchemaValidatorCa91Da84401ABba1 \
as JSONSchemaValidatorCa91Da84401ABba1_v1_3_3
from .validators.v1_3_3.jsd_caa3ea704d78b37e \
import JSONSchemaValidatorCaa3Ea704D78B37E \
as JSONSchemaValidatorCaa3Ea704D78B37E_v1_3_3
from .validators.v1_3_3.jsd_cb81b93540baaab0 \
import JSONSchemaValidatorCb81B93540BaAab0 \
as JSONSchemaValidatorCb81B93540BaAab0_v1_3_3
from .validators.v1_3_3.jsd_cb868b2142898159 \
import JSONSchemaValidatorCb868B2142898159 \
as JSONSchemaValidatorCb868B2142898159_v1_3_3
from .validators.v1_3_3.jsd_cba5b8b14edb81f4 \
import JSONSchemaValidatorCba5B8B14Edb81F4 \
as JSONSchemaValidatorCba5B8B14Edb81F4_v1_3_3
from .validators.v1_3_3.jsd_cca519ba45ebb423 \
import JSONSchemaValidatorCca519Ba45EbB423 \
as JSONSchemaValidatorCca519Ba45EbB423_v1_3_3
from .validators.v1_3_3.jsd_cd8469e647caab0e \
import JSONSchemaValidatorCd8469E647CaAb0E \
as JSONSchemaValidatorCd8469E647CaAb0E_v1_3_3
from .validators.v1_3_3.jsd_cd98780f4888a66d \
import JSONSchemaValidatorCd98780F4888A66D \
as JSONSchemaValidatorCd98780F4888A66D_v1_3_3
from .validators.v1_3_3.jsd_cdab9b474899ae06 \
import JSONSchemaValidatorCdab9B474899Ae06 \
as JSONSchemaValidatorCdab9B474899Ae06_v1_3_3
from .validators.v1_3_3.jsd_cf9418234d9ab37e \
import JSONSchemaValidatorCf9418234D9AB37E \
as JSONSchemaValidatorCf9418234D9AB37E_v1_3_3
from .validators.v1_3_3.jsd_cfa049a644bb8a07 \
import JSONSchemaValidatorCfa049A644Bb8A07 \
as JSONSchemaValidatorCfa049A644Bb8A07_v1_3_3
from .validators.v1_3_3.jsd_cfbd3870405aad55 \
import JSONSchemaValidatorCfbd3870405AAd55 \
as JSONSchemaValidatorCfbd3870405AAd55_v1_3_3
from .validators.v1_3_3.jsd_d09b08a3447aa3b9 \
import JSONSchemaValidatorD09B08A3447AA3B9 \
as JSONSchemaValidatorD09B08A3447AA3B9_v1_3_3
from .validators.v1_3_3.jsd_d0a1abfa435b841d \
import JSONSchemaValidatorD0A1Abfa435B841D \
as JSONSchemaValidatorD0A1Abfa435B841D_v1_3_3
from .validators.v1_3_3.jsd_d0a4b88145aabb51 \
import JSONSchemaValidatorD0A4B88145AaBb51 \
as JSONSchemaValidatorD0A4B88145AaBb51_v1_3_3
from .validators.v1_3_3.jsd_d0aafa694f4b9d7b \
import JSONSchemaValidatorD0AaFa694F4B9D7B \
as JSONSchemaValidatorD0AaFa694F4B9D7B_v1_3_3
from .validators.v1_3_3.jsd_d2b4d9d04a4b884c \
import JSONSchemaValidatorD2B4D9D04A4B884C \
as JSONSchemaValidatorD2B4D9D04A4B884C_v1_3_3
from .validators.v1_3_3.jsd_d49af9b84c6aa8ea \
import JSONSchemaValidatorD49AF9B84C6AA8Ea \
as JSONSchemaValidatorD49AF9B84C6AA8Ea_v1_3_3
from .validators.v1_3_3.jsd_d6b8ca774739adf4 \
import JSONSchemaValidatorD6B8Ca774739Adf4 \
as JSONSchemaValidatorD6B8Ca774739Adf4_v1_3_3
from .validators.v1_3_3.jsd_d7a6392845e8969d \
import JSONSchemaValidatorD7A6392845E8969D \
as JSONSchemaValidatorD7A6392845E8969D_v1_3_3
from .validators.v1_3_3.jsd_d888ab6d4d59a8c1 \
import JSONSchemaValidatorD888Ab6D4D59A8C1 \
as JSONSchemaValidatorD888Ab6D4D59A8C1_v1_3_3
from .validators.v1_3_3.jsd_d8a619974a8a8c48 \
import JSONSchemaValidatorD8A619974A8A8C48 \
as JSONSchemaValidatorD8A619974A8A8C48_v1_3_3
from .validators.v1_3_3.jsd_d9a1fa9c4068b23c \
import JSONSchemaValidatorD9A1Fa9C4068B23C \
as JSONSchemaValidatorD9A1Fa9C4068B23C_v1_3_3
from .validators.v1_3_3.jsd_db8e09234a988bab \
import JSONSchemaValidatorDb8E09234A988Bab \
as JSONSchemaValidatorDb8E09234A988Bab_v1_3_3
from .validators.v1_3_3.jsd_dcaa6bde4feb9152 \
import JSONSchemaValidatorDcaa6Bde4Feb9152 \
as JSONSchemaValidatorDcaa6Bde4Feb9152_v1_3_3
from .validators.v1_3_3.jsd_dd85c91042489a3f \
import JSONSchemaValidatorDd85C91042489A3F \
as JSONSchemaValidatorDd85C91042489A3F_v1_3_3
from .validators.v1_3_3.jsd_e0b5599b4f2997b7 \
import JSONSchemaValidatorE0B5599B4F2997B7 \
as JSONSchemaValidatorE0B5599B4F2997B7_v1_3_3
from .validators.v1_3_3.jsd_e2adba7943bab3e9 \
import JSONSchemaValidatorE2AdBa7943BaB3E9 \
as JSONSchemaValidatorE2AdBa7943BaB3E9_v1_3_3
from .validators.v1_3_3.jsd_e39588a5494982c4 \
import JSONSchemaValidatorE39588A5494982C4 \
as JSONSchemaValidatorE39588A5494982C4_v1_3_3
from .validators.v1_3_3.jsd_e487f8d3481b94f2 \
import JSONSchemaValidatorE487F8D3481B94F2 \
as JSONSchemaValidatorE487F8D3481B94F2_v1_3_3
from .validators.v1_3_3.jsd_e6b3db8046c99654 \
import JSONSchemaValidatorE6B3Db8046C99654 \
as JSONSchemaValidatorE6B3Db8046C99654_v1_3_3
from .validators.v1_3_3.jsd_e78bb8a2449b9eed \
import JSONSchemaValidatorE78BB8A2449B9Eed \
as JSONSchemaValidatorE78BB8A2449B9Eed_v1_3_3
from .validators.v1_3_3.jsd_e9b99b2248c88014 \
import JSONSchemaValidatorE9B99B2248C88014 \
as JSONSchemaValidatorE9B99B2248C88014_v1_3_3
from .validators.v1_3_3.jsd_eab7abe048fb99ad \
import JSONSchemaValidatorEab7Abe048Fb99Ad \
as JSONSchemaValidatorEab7Abe048Fb99Ad_v1_3_3
from .validators.v1_3_3.jsd_eb8249e34f69b0f1 \
import JSONSchemaValidatorEb8249E34F69B0F1 \
as JSONSchemaValidatorEb8249E34F69B0F1_v1_3_3
from .validators.v1_3_3.jsd_eba669054e08a60e \
import JSONSchemaValidatorEba669054E08A60E \
as JSONSchemaValidatorEba669054E08A60E_v1_3_3
from .validators.v1_3_3.jsd_ee9aab01487a8896 \
import JSONSchemaValidatorEe9AAb01487A8896 \
as JSONSchemaValidatorEe9AAb01487A8896_v1_3_3
from .validators.v1_3_3.jsd_eeb168eb41988e07 \
import JSONSchemaValidatorEeb168Eb41988E07 \
as JSONSchemaValidatorEeb168Eb41988E07_v1_3_3
from .validators.v1_3_3.jsd_eeb7eb4b4bd8a1dd \
import JSONSchemaValidatorEeb7Eb4B4Bd8A1Dd \
as JSONSchemaValidatorEeb7Eb4B4Bd8A1Dd_v1_3_3
from .validators.v1_3_3.jsd_f083cb13484a8fae \
import JSONSchemaValidatorF083Cb13484A8Fae \
as JSONSchemaValidatorF083Cb13484A8Fae_v1_3_3
from .validators.v1_3_3.jsd_f09319674049a7d4 \
import JSONSchemaValidatorF09319674049A7D4 \
as JSONSchemaValidatorF09319674049A7D4_v1_3_3
from .validators.v1_3_3.jsd_f393abe84989bb48 \
import JSONSchemaValidatorF393Abe84989Bb48 \
as JSONSchemaValidatorF393Abe84989Bb48_v1_3_3
from .validators.v1_3_3.jsd_f3b26b5544cabab9 \
import JSONSchemaValidatorF3B26B5544CaBab9 \
as JSONSchemaValidatorF3B26B5544CaBab9_v1_3_3
from .validators.v1_3_3.jsd_f49548c54be8a3e2 \
import JSONSchemaValidatorF49548C54Be8A3E2 \
as JSONSchemaValidatorF49548C54Be8A3E2_v1_3_3
from .validators.v1_3_3.jsd_f5947a4c439a8bf0 \
import JSONSchemaValidatorF5947A4C439A8Bf0 \
as JSONSchemaValidatorF5947A4C439A8Bf0_v1_3_3
from .validators.v1_3_3.jsd_f5a13ab24c5aaa91 \
import JSONSchemaValidatorF5A13Ab24C5AAa91 \
as JSONSchemaValidatorF5A13Ab24C5AAa91_v1_3_3
from .validators.v1_3_3.jsd_f5a269c44f2a95fa \
import JSONSchemaValidatorF5A269C44F2A95Fa \
as JSONSchemaValidatorF5A269C44F2A95Fa_v1_3_3
from .validators.v1_3_3.jsd_f5ac590c4ca9975a \
import JSONSchemaValidatorF5Ac590C4Ca9975A \
as JSONSchemaValidatorF5Ac590C4Ca9975A_v1_3_3
from .validators.v1_3_3.jsd_f6826a8e41bba242 \
import JSONSchemaValidatorF6826A8E41BbA242 \
as JSONSchemaValidatorF6826A8E41BbA242_v1_3_3
from .validators.v1_3_3.jsd_f6ac994f451ba011 \
import JSONSchemaValidatorF6Ac994F451BA011 \
as JSONSchemaValidatorF6Ac994F451BA011_v1_3_3
from .validators.v1_3_3.jsd_f6b119ad4d4aaf16 \
import JSONSchemaValidatorF6B119Ad4D4AAf16 \
as JSONSchemaValidatorF6B119Ad4D4AAf16_v1_3_3
from .validators.v1_3_3.jsd_f6bd6bf64e6890be \
import JSONSchemaValidatorF6Bd6Bf64E6890Be \
as JSONSchemaValidatorF6Bd6Bf64E6890Be_v1_3_3
from .validators.v1_3_3.jsd_f793192a43dabed9 \
import JSONSchemaValidatorF793192A43DaBed9 \
as JSONSchemaValidatorF793192A43DaBed9_v1_3_3
from .validators.v1_3_3.jsd_f9bd99c74bba8832 \
import JSONSchemaValidatorF9Bd99C74Bba8832 \
as JSONSchemaValidatorF9Bd99C74Bba8832_v1_3_3
from .validators.v1_3_3.jsd_fa9219bf45c8b43b \
import JSONSchemaValidatorFa9219Bf45C8B43B \
as JSONSchemaValidatorFa9219Bf45C8B43B_v1_3_3
from .validators.v1_3_3.jsd_fb9beb664f2aba4c \
import JSONSchemaValidatorFb9BEb664F2ABa4C \
as JSONSchemaValidatorFb9BEb664F2ABa4C_v1_3_3
from .validators.v1_3_3.jsd_fb9bf80f491a9851 \
import JSONSchemaValidatorFb9BF80F491A9851 \
as JSONSchemaValidatorFb9BF80F491A9851_v1_3_3
from .validators.v1_3_3.jsd_fba0d80747eb82e8 \
import JSONSchemaValidatorFba0D80747Eb82E8 \
as JSONSchemaValidatorFba0D80747Eb82E8_v1_3_3
from .validators.v1_3_3.jsd_fbb95b37484a9fce \
import JSONSchemaValidatorFbb95B37484A9Fce \
as JSONSchemaValidatorFbb95B37484A9Fce_v1_3_3
from .validators.v1_3_3.jsd_fc9538fe43d9884d \
import JSONSchemaValidatorFc9538Fe43D9884D \
as JSONSchemaValidatorFc9538Fe43D9884D_v1_3_3
from .validators.v1_3_3.jsd_ff816b8e435897eb \
import JSONSchemaValidatorFf816B8E435897Eb \
as JSONSchemaValidatorFf816B8E435897Eb_v1_3_3
from .validators.v1_3_3.jsd_ffa748cc44e9a437 \
import JSONSchemaValidatorFfa748Cc44E9A437 \
as JSONSchemaValidatorFfa748Cc44E9A437_v1_3_3
from .validators.v2_1_1.jsd_00a2fa6146089317 \
import JSONSchemaValidator00A2Fa6146089317 \
as JSONSchemaValidator00A2Fa6146089317_v2_1_1
from .validators.v2_1_1.jsd_00aec9b1422ab27e \
import JSONSchemaValidator00AeC9B1422AB27E \
as JSONSchemaValidator00AeC9B1422AB27E_v2_1_1
from .validators.v2_1_1.jsd_039de8b147a98690 \
import JSONSchemaValidator039DE8B147A98690 \
as JSONSchemaValidator039DE8B147A98690_v2_1_1
from .validators.v2_1_1.jsd_03b4c8b44919b964 \
import JSONSchemaValidator03B4C8B44919B964 \
as JSONSchemaValidator03B4C8B44919B964_v2_1_1
from .validators.v2_1_1.jsd_069d9823451b892d \
import JSONSchemaValidator069D9823451B892D \
as JSONSchemaValidator069D9823451B892D_v2_1_1
from .validators.v2_1_1.jsd_07874a4c4c9aabd9 \
import JSONSchemaValidator07874A4C4C9AAbd9 \
as JSONSchemaValidator07874A4C4C9AAbd9_v2_1_1
from .validators.v2_1_1.jsd_098cab9141c9a3fe \
import JSONSchemaValidator098CAb9141C9A3Fe \
as JSONSchemaValidator098CAb9141C9A3Fe_v2_1_1
from .validators.v2_1_1.jsd_09b0f9ce4239ae10 \
import JSONSchemaValidator09B0F9Ce4239Ae10 \
as JSONSchemaValidator09B0F9Ce4239Ae10_v2_1_1
from .validators.v2_1_1.jsd_0a9c988445cb91c8 \
import JSONSchemaValidator0A9C988445Cb91C8 \
as JSONSchemaValidator0A9C988445Cb91C8_v2_1_1
from .validators.v2_1_1.jsd_0b836b7b4b6a9fd5 \
import JSONSchemaValidator0B836B7B4B6A9Fd5 \
as JSONSchemaValidator0B836B7B4B6A9Fd5_v2_1_1
from .validators.v2_1_1.jsd_0c8f7a0b49b9aedd \
import JSONSchemaValidator0C8F7A0B49B9Aedd \
as JSONSchemaValidator0C8F7A0B49B9Aedd_v2_1_1
from .validators.v2_1_1.jsd_0db7da744c0b83d8 \
import JSONSchemaValidator0Db7Da744C0B83D8 \
as JSONSchemaValidator0Db7Da744C0B83D8_v2_1_1
from .validators.v2_1_1.jsd_0fa00adf48698287 \
import JSONSchemaValidator0Fa00Adf48698287 \
as JSONSchemaValidator0Fa00Adf48698287_v2_1_1
from .validators.v2_1_1.jsd_109d1b4f4289aecd \
import JSONSchemaValidator109D1B4F4289Aecd \
as JSONSchemaValidator109D1B4F4289Aecd_v2_1_1
from .validators.v2_1_1.jsd_10b06a6a4f7bb3cb \
import JSONSchemaValidator10B06A6A4F7BB3Cb \
as JSONSchemaValidator10B06A6A4F7BB3Cb_v2_1_1
from .validators.v2_1_1.jsd_138518e14069ab5f \
import JSONSchemaValidator138518E14069Ab5F \
as JSONSchemaValidator138518E14069Ab5F_v2_1_1
from .validators.v2_1_1.jsd_1399891c42a8be64 \
import JSONSchemaValidator1399891C42A8Be64 \
as JSONSchemaValidator1399891C42A8Be64_v2_1_1
from .validators.v2_1_1.jsd_149aa93b4ddb80dd \
import JSONSchemaValidator149AA93B4Ddb80Dd \
as JSONSchemaValidator149AA93B4Ddb80Dd_v2_1_1
from .validators.v2_1_1.jsd_149b7ba04e5890b2 \
import JSONSchemaValidator149B7Ba04E5890B2 \
as JSONSchemaValidator149B7Ba04E5890B2_v2_1_1
from .validators.v2_1_1.jsd_15b7aa0c4dda8e85 \
import JSONSchemaValidator15B7Aa0C4Dda8E85 \
as JSONSchemaValidator15B7Aa0C4Dda8E85_v2_1_1
from .validators.v2_1_1.jsd_16a1bb5d48cb873d \
import JSONSchemaValidator16A1Bb5D48Cb873D \
as JSONSchemaValidator16A1Bb5D48Cb873D_v2_1_1
from .validators.v2_1_1.jsd_17929bc7465bb564 \
import JSONSchemaValidator17929Bc7465BB564 \
as JSONSchemaValidator17929Bc7465BB564_v2_1_1
from .validators.v2_1_1.jsd_1c894b5848eab214 \
import JSONSchemaValidator1C894B5848EaB214 \
as JSONSchemaValidator1C894B5848EaB214_v2_1_1
from .validators.v2_1_1.jsd_1da5ebdd434aacfe \
import JSONSchemaValidator1Da5Ebdd434AAcfe \
as JSONSchemaValidator1Da5Ebdd434AAcfe_v2_1_1
from .validators.v2_1_1.jsd_1e962af345b8b59f \
import JSONSchemaValidator1E962Af345B8B59F \
as JSONSchemaValidator1E962Af345B8B59F_v2_1_1
from .validators.v2_1_1.jsd_1eaa8b2148ab81de \
import JSONSchemaValidator1Eaa8B2148Ab81De \
as JSONSchemaValidator1Eaa8B2148Ab81De_v2_1_1
from .validators.v2_1_1.jsd_1eb19887457b9616 \
import JSONSchemaValidator1Eb19887457B9616 \
as JSONSchemaValidator1Eb19887457B9616_v2_1_1
from .validators.v2_1_1.jsd_1eb72ad34e098990 \
import JSONSchemaValidator1Eb72Ad34E098990 \
as JSONSchemaValidator1Eb72Ad34E098990_v2_1_1
from .validators.v2_1_1.jsd_1fb8f9f24c998133 \
import JSONSchemaValidator1Fb8F9F24C998133 \
as JSONSchemaValidator1Fb8F9F24C998133_v2_1_1
from .validators.v2_1_1.jsd_208579ea4ed98f4f \
import JSONSchemaValidator208579Ea4Ed98F4F \
as JSONSchemaValidator208579Ea4Ed98F4F_v2_1_1
from .validators.v2_1_1.jsd_20b19b52464b8972 \
import JSONSchemaValidator20B19B52464B8972 \
as JSONSchemaValidator20B19B52464B8972_v2_1_1
from .validators.v2_1_1.jsd_21a6db2540298f55 \
import JSONSchemaValidator21A6Db2540298F55 \
as JSONSchemaValidator21A6Db2540298F55_v2_1_1
from .validators.v2_1_1.jsd_2499e9ad42e8ae5b \
import JSONSchemaValidator2499E9Ad42E8Ae5B \
as JSONSchemaValidator2499E9Ad42E8Ae5B_v2_1_1
from .validators.v2_1_1.jsd_259eab3045988958 \
import JSONSchemaValidator259EAb3045988958 \
as JSONSchemaValidator259EAb3045988958_v2_1_1
from .validators.v2_1_1.jsd_26b44ab04649a183 \
import JSONSchemaValidator26B44Ab04649A183 \
as JSONSchemaValidator26B44Ab04649A183_v2_1_1
from .validators.v2_1_1.jsd_288df9494f2a9746 \
import JSONSchemaValidator288DF9494F2A9746 \
as JSONSchemaValidator288DF9494F2A9746_v2_1_1
from .validators.v2_1_1.jsd_28b24a744a9994be \
import JSONSchemaValidator28B24A744A9994Be \
as JSONSchemaValidator28B24A744A9994Be_v2_1_1
from .validators.v2_1_1.jsd_2e9db85840fbb1cf \
import JSONSchemaValidator2E9DB85840FbB1Cf \
as JSONSchemaValidator2E9DB85840FbB1Cf_v2_1_1
from .validators.v2_1_1.jsd_2eb1fa1e49caa2b4 \
import JSONSchemaValidator2Eb1Fa1E49CaA2B4 \
as JSONSchemaValidator2Eb1Fa1E49CaA2B4_v2_1_1
from .validators.v2_1_1.jsd_2f97e8fa45f8b2a3 \
import JSONSchemaValidator2F97E8Fa45F8B2A3 \
as JSONSchemaValidator2F97E8Fa45F8B2A3_v2_1_1
from .validators.v2_1_1.jsd_3086c9624f498b85 \
import JSONSchemaValidator3086C9624F498B85 \
as JSONSchemaValidator3086C9624F498B85_v2_1_1
from .validators.v2_1_1.jsd_33b799d04d0a8907 \
import JSONSchemaValidator33B799D04D0A8907 \
as JSONSchemaValidator33B799D04D0A8907_v2_1_1
from .validators.v2_1_1.jsd_33bb2b9d40199e14 \
import JSONSchemaValidator33Bb2B9D40199E14 \
as JSONSchemaValidator33Bb2B9D40199E14_v2_1_1
from .validators.v2_1_1.jsd_349c888443b89a58 \
import JSONSchemaValidator349C888443B89A58 \
as JSONSchemaValidator349C888443B89A58_v2_1_1
from .validators.v2_1_1.jsd_38b7eb13449b9471 \
import JSONSchemaValidator38B7Eb13449B9471 \
as JSONSchemaValidator38B7Eb13449B9471_v2_1_1
from .validators.v2_1_1.jsd_38bd0b884b89a785 \
import JSONSchemaValidator38Bd0B884B89A785 \
as JSONSchemaValidator38Bd0B884B89A785_v2_1_1
from .validators.v2_1_1.jsd_398668874439a41d \
import JSONSchemaValidator398668874439A41D \
as JSONSchemaValidator398668874439A41D_v2_1_1
from .validators.v2_1_1.jsd_3b9ef9674429be4c \
import JSONSchemaValidator3B9EF9674429Be4C \
as JSONSchemaValidator3B9EF9674429Be4C_v2_1_1
from .validators.v2_1_1.jsd_3cb24acb486b89d2 \
import JSONSchemaValidator3Cb24Acb486B89D2 \
as JSONSchemaValidator3Cb24Acb486B89D2_v2_1_1
from .validators.v2_1_1.jsd_3d923b184dc9a4ca \
import JSONSchemaValidator3D923B184Dc9A4Ca \
as JSONSchemaValidator3D923B184Dc9A4Ca_v2_1_1
from .validators.v2_1_1.jsd_3d9b99c343398a27 \
import JSONSchemaValidator3D9B99C343398A27 \
as JSONSchemaValidator3D9B99C343398A27_v2_1_1
from .validators.v2_1_1.jsd_3e94cb1b485b8b0e \
import JSONSchemaValidator3E94Cb1B485B8B0E \
as JSONSchemaValidator3E94Cb1B485B8B0E_v2_1_1
from .validators.v2_1_1.jsd_3ebcda3e4acbafb7 \
import JSONSchemaValidator3EbcDa3E4AcbAfb7 \
as JSONSchemaValidator3EbcDa3E4AcbAfb7_v2_1_1
from .validators.v2_1_1.jsd_3f89bbfc4f6b8b50 \
import JSONSchemaValidator3F89Bbfc4F6B8B50 \
as JSONSchemaValidator3F89Bbfc4F6B8B50_v2_1_1
from .validators.v2_1_1.jsd_3faaa9944b49bc9f \
import JSONSchemaValidator3FaaA9944B49Bc9F \
as JSONSchemaValidator3FaaA9944B49Bc9F_v2_1_1
from .validators.v2_1_1.jsd_429c28154bdaa13d \
import JSONSchemaValidator429C28154BdaA13D \
as JSONSchemaValidator429C28154BdaA13D_v2_1_1
from .validators.v2_1_1.jsd_42b6a86e44b8bdfc \
import JSONSchemaValidator42B6A86E44B8Bdfc \
as JSONSchemaValidator42B6A86E44B8Bdfc_v2_1_1
from .validators.v2_1_1.jsd_44974ba5435a801d \
import JSONSchemaValidator44974Ba5435A801D \
as JSONSchemaValidator44974Ba5435A801D_v2_1_1
from .validators.v2_1_1.jsd_44a39a074a6a82a2 \
import JSONSchemaValidator44A39A074A6A82A2 \
as JSONSchemaValidator44A39A074A6A82A2_v2_1_1
from .validators.v2_1_1.jsd_45bc7a8344a8bc1e \
import JSONSchemaValidator45Bc7A8344A8Bc1E \
as JSONSchemaValidator45Bc7A8344A8Bc1E_v2_1_1
from .validators.v2_1_1.jsd_4695090d403b8eaa \
import JSONSchemaValidator4695090D403B8Eaa \
as JSONSchemaValidator4695090D403B8Eaa_v2_1_1
from .validators.v2_1_1.jsd_47a1b84b4e1b8044 \
import JSONSchemaValidator47A1B84B4E1B8044 \
as JSONSchemaValidator47A1B84B4E1B8044_v2_1_1
from .validators.v2_1_1.jsd_4ababa75489ab24b \
import JSONSchemaValidator4AbaBa75489AB24B \
as JSONSchemaValidator4AbaBa75489AB24B_v2_1_1
from .validators.v2_1_1.jsd_4bb22af046fa8f08 \
import JSONSchemaValidator4Bb22Af046Fa8F08 \
as JSONSchemaValidator4Bb22Af046Fa8F08_v2_1_1
from .validators.v2_1_1.jsd_4c8cab5f435a80f4 \
import JSONSchemaValidator4C8CAb5F435A80F4 \
as JSONSchemaValidator4C8CAb5F435A80F4_v2_1_1
from .validators.v2_1_1.jsd_4ca2db1143ebb5d7 \
import JSONSchemaValidator4Ca2Db1143EbB5D7 \
as JSONSchemaValidator4Ca2Db1143EbB5D7_v2_1_1
from .validators.v2_1_1.jsd_4d86a993469a9da9 \
import JSONSchemaValidator4D86A993469A9Da9 \
as JSONSchemaValidator4D86A993469A9Da9_v2_1_1
from .validators.v2_1_1.jsd_4d9ca8e2431a8a24 \
import JSONSchemaValidator4D9CA8E2431A8A24 \
as JSONSchemaValidator4D9CA8E2431A8A24_v2_1_1
from .validators.v2_1_1.jsd_4da91a544e29842d \
import JSONSchemaValidator4Da91A544E29842D \
as JSONSchemaValidator4Da91A544E29842D_v2_1_1
from .validators.v2_1_1.jsd_4dbe3bc743a891bc \
import JSONSchemaValidator4Dbe3Bc743A891Bc \
as JSONSchemaValidator4Dbe3Bc743A891Bc_v2_1_1
from .validators.v2_1_1.jsd_4eb56a614cc9a2d2 \
import JSONSchemaValidator4Eb56A614Cc9A2D2 \
as JSONSchemaValidator4Eb56A614Cc9A2D2_v2_1_1
from .validators.v2_1_1.jsd_4f947a1c4fc884f6 \
import JSONSchemaValidator4F947A1C4Fc884F6 \
as | |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import shutil
import sys
from functools import partial
import azlmbr.editor
import azlmbr.legacy.general as general
import azlmbr.paths
import azlmbr.asset
sys.path.append(os.path.join(azlmbr.paths.devassets, "Gem", "PythonTests"))
from Automated.atom_utils.automated_test_utils import TestHelper as helper
from Automated.atom_utils.screenshot_utils import ScreenshotHelper
from Automated.atom_utils import hydra_editor_utils as hydra
class ModelReloadHelper():
def __init__(self):
self.is_model_ready = False
def model_is_ready_predicate(self):
"""
A predicate function what will be used in wait_for_condition.
"""
return self.is_model_ready
def on_model_ready(self, parameters):
self.is_model_ready = True
def copy_file_and_wait_for_on_model_ready(self, entityId, sourceFile):
# Connect to the MeshNotificationBus
# Listen for notifications when entities are created/deleted
self.on_model_ready_handler = azlmbr.bus.NotificationHandler('MeshComponentNotificationBus')
self.on_model_ready_handler.connect(entityId)
self.on_model_ready_handler.add_callback('OnModelReady', self.on_model_ready)
# Set is_model_ready to false after connecting, but before copying the new model
# in case an OnModelReady event is fired when adding the callback
self.is_model_ready = False
if copy_file(sourceFile, 'Objects/ModelHotReload/hotreload.fbx'):
return helper.wait_for_condition(lambda: self.is_model_ready, 20.0)
else:
# copy_file failed
return False
def run():
"""
Test Case - Material:
1. Opens the "Empty" level
2. Creates a new entity and attaches the Mesh+Material components to it.
3. Sets a camera to point at the entity.
4. Applies a material that will display the vertex color.
5. Applies a mesh that does not exist yet to the Mesh component.
6. Copies a model with a vertex color stream to the location of the asset applied to the mesh component.
7. Verifies the vertex color is consumed by a shader correctly via screenshot comparison.
8. Reloads the model using one without a vertex color stream.
9. Verifies the vertex color is no longer consumed by the shader via screenshot comparison.
10. Reloads the model using one with multiple materials
11. Verifies the correct material slots appear on the material component.
12. Reloads the model using one with different materials and multiple lods.
13. Verifies the correct material slots appear on the material component.
14. Reloads the model using one without lods and with an extra color stream.
15. Verifies the correct material slots appear on the material component.
16. Closes the Editor and the test ends.
:return: None
"""
# Open EmptyLevel.
helper.init_idle()
helper.open_level("EmptyLevel")
# Create a new entity and attach a Mesh+Material component to it.
mesh_offset = azlmbr.math.Vector3(4.5, 3.0, 0.0)
my_entity_id = azlmbr.editor.ToolsApplicationRequestBus(azlmbr.bus.Broadcast, 'CreateNewEntity', azlmbr.entity.EntityId())
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetWorldTranslation", my_entity_id, mesh_offset)
if my_entity_id.IsValid():
general.log("Entity successfully created.")
mesh_component = helper.attach_component_to_entity(my_entity_id, 'Mesh')
material_component = helper.attach_component_to_entity(my_entity_id, 'Material')
# Find the entity with a camera
search_filter = azlmbr.entity.SearchFilter()
search_filter.names = ['Camera']
camera_entity_id = azlmbr.entity.SearchBus(azlmbr.bus.Broadcast, 'SearchEntities', search_filter)[0]
# Make the camera look at the mesh component entity
camera_position = mesh_offset.Add(azlmbr.math.Vector3(-5.0, 0.0, 0.0))
forward_axis = 2 #YPositive
camera_transform = azlmbr.math.Transform_CreateLookAt(camera_position, mesh_offset, forward_axis)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetWorldTM", camera_entity_id, camera_transform)
azlmbr.editor.EditorCameraRequestBus(
azlmbr.bus.Broadcast, "SetViewAndMovementLockFromEntityPerspective", camera_entity_id, False)
# Apply a material that will display the vertex color
display_vertex_color_path = os.path.join("testdata", "objects", "modelhotreload", "displayvertexcolor.azmaterial")
display_vertex_color_asset_id = hydra.get_asset_by_path(display_vertex_color_path)
property_path = 'Default Material|Material Asset'
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', material_component, property_path, display_vertex_color_asset_id)
# Set mesh asset 'testdata/objects/modelhotreload/hotreload.azmodel'
# Note, this mesh does not yet exist. Part of the test is that it reloads once it is added
# Since it doesn't exist in the asset catalog yet, and we have no way to auto-generate the correct sub-id, we must use the hard coded assetId
model_id = azlmbr.asset.AssetId_CreateString("{66ADF6FF-3CA4-51F6-9681-5697D4A29F56}:10241ecb")
mesh_property_path = 'Controller|Configuration|Mesh Asset'
newObj = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'SetComponentProperty', mesh_component, mesh_property_path, model_id)
model_reload_helper = ModelReloadHelper()
# Copy the vertexcolor.fbx file to the location of hotreload.azmodel, and wait for it to be ready
if not model_reload_helper.copy_file_and_wait_for_on_model_ready(my_entity_id, 'Objects/ModelHotReload/vertexcolor.fbx'):
general.log("OnModelReady never happened - vertexcolor.fbx")
# Use a screenshot for validation since the presence of a vertex color stream should change the appearance of the object
screenshot_helper = ScreenshotHelper(general.idle_wait_frames)
screenshot_helper.capture_screenshot_blocking_in_game_mode('screenshot_atom_ModelHotReload_VertexColor.ppm')
# Test that removing a vertex stream functions
if not model_reload_helper.copy_file_and_wait_for_on_model_ready(my_entity_id, 'Objects/ModelHotReload/novertexcolor.fbx'):
general.log("OnModelReady never happened - novertexcolor.fbx")
# Use a screenshot for validation since the absence of a vertex color stream should change the appearance of the object
screenshot_helper.capture_screenshot_blocking_in_game_mode('screenshot_atom_ModelHotReload_NoVertexColor.ppm')
# hot-reload the mesh that multiple materials, plus more/fewer vertices
if not model_reload_helper.copy_file_and_wait_for_on_model_ready(my_entity_id, 'Multi-mat_fbx/multi-mat_mesh-groups_1m_cubes.fbx'):
general.log("OnModelReady never happened - multi-mat_mesh-groups_1m_cubes.fbx")
# Use the presence of multiple material slots in the material component to validate that the model was reloaded
# and to verify the material component was updated
lod_material_list = ["StingrayPBS1", "Red_Xaxis", "Green_Yaxis", "Blue_Zaxis", "With_Texture"]
model_material_override_lod = 18446744073709551615
material_label_dict = {
model_material_override_lod:lod_material_list,
0:lod_material_list,
}
validate_material_slot_labels(my_entity_id, material_label_dict)
# Test that increasing the lod count functions
if not model_reload_helper.copy_file_and_wait_for_on_model_ready(my_entity_id, 'Objects/ModelHotReload/sphere_5lods.fbx'):
general.log("OnModelReady never happened - sphere_5lods.fbx")
# The model material overrides have 5 slots, each individual lod only has 1 slot
material_label_dict = {
model_material_override_lod:["lambert0", "lambert3", "lambert4", "lambert5", "lambert6"],
0:["lambert0"],
1:["lambert3"],
2:["lambert4"],
3:["lambert5"],
4:["lambert6"],
}
validate_material_slot_labels(my_entity_id, material_label_dict)
# Test that adding a vertex stream and removing lods functions
if not model_reload_helper.copy_file_and_wait_for_on_model_ready(my_entity_id, 'Objects/ModelHotReload/vertexcolor.fbx'):
general.log("OnModelReady never happened - vertexcolor.fbx")
# Use the presence of a single material slot in the material component to validate the model reloaded
lod_material_list = ["Material"]
material_label_dict = {
model_material_override_lod:lod_material_list,
0:lod_material_list,
}
validate_material_slot_labels(my_entity_id, material_label_dict)
print_material_slot_labels(my_entity_id)
"""
Future steps for other test cases
apply a material override to two of the materials
apply a property override to one of the materials
Default material not properly being cleared
- apply default material on one model
-- reload to different model (sphere5_lods)
- apply material slot overrides
- clear the default material assignment (this might have been done before reloading the mesh, or without reloading at all, just assigning a new mesh) (it might have been done before or after applying the slot overrides)
- expected: slots without overrides use their default material
- actual: slots without overrides use the old default material
remove one of the materials
change some faces to use the same material as one of the already overriden slots
also change some faces to use one of the materials that has the default applied in the material component
also change some faces to use a newly added material
enable lod material override
Repeat with the actor component
Reload model with cloth component
"""
# Close the Editor to end the test.
helper.close_editor()
def print_material_slot_labels(entityId):
# Helper function useful while writing/modifying the test that will output the available materials slots
general.log("Printing Material Slot AssignmentIds and Labels")
material_assignment_map = azlmbr.render.MaterialComponentRequestBus(azlmbr.bus.Event, 'GetOriginalMaterialAssignments', entityId)
for assignment_id in material_assignment_map:
general.log(f" AssignementId (slotId:lod): {assignment_id.ToString()}")
slot_label = azlmbr.render.MaterialComponentRequestBus(azlmbr.bus.Event, 'GetMaterialSlotLabel', entityId, assignment_id)
general.log(f" SlotLabel: {slot_label}")
def validate_material_slot_labels(entityId, material_label_dict):
"""
Validate that the original material assignment map on the entity matches what is expected
:param entityId: The entity with the material component
:param material_label_dict: A dict where each key is an lod index and each value a list of expected material slot labels.
:return: True if all the expected slot/lod combinations are found and no unexpected combinations are found. False otherwise.
"""
# keep track of whether or not each materialLabel for each lod is found
found_labels = dict()
for lod in material_label_dict:
found_labels[lod] = dict()
for label in material_label_dict[lod]:
found_labels[lod][label] = False
# Look for lods or slots that were not expected. Mark the expected ones as found
material_assignment_map = azlmbr.render.MaterialComponentRequestBus(azlmbr.bus.Event, 'GetOriginalMaterialAssignments', entityId)
for assignment_id in material_assignment_map:
# Ignore the default assignment, since it exists for every model/lod
if not assignment_id.IsDefault():
if assignment_id.lodIndex not in found_labels:
general.log("There is an unexpected lod in the material map")
general.log(f" lod: {assignment_id.lodIndex}")
return False
else:
slot_label = azlmbr.render.MaterialComponentRequestBus(azlmbr.bus.Event, 'GetMaterialSlotLabel', entityId, assignment_id)
if slot_label not in found_labels[assignment_id.lodIndex]:
general.log("There is an unexpected material slot in the lod")
general.log(f" lod: {assignment_id.lodIndex} slot label: {slot_label}")
return False
else:
found_labels[assignment_id.lodIndex][slot_label] = True
# Check to see that all the expected lods and labels were found
for material_lod in found_labels:
for label in found_labels[material_lod]:
if not found_labels[material_lod][label]:
general.log("There was an expected material slot/lod combination that was not found")
general.log(f" lod: {assignment_id.lodIndex} slot label: {slot_label}")
return False
# All the expected material slot/lod combinations were found
return True
TESTDATA_ASSET_PATH = os.path.join(
azlmbr.paths.devroot, "Gems", "Atom", "TestData", "TestData"
)
def copy_file(src_path, dst_path):
src_path = os.path.join(TESTDATA_ASSET_PATH, src_path)
dst_path = os.path.join(TESTDATA_ASSET_PATH, dst_path)
dst_dir = | |
<gh_stars>10-100
"""
GST contraction algorithms
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import warnings as _warnings
import numpy as _np
from pygsti import baseobjs as _baseobjs
from pygsti import optimize as _opt
from pygsti import tools as _tools
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
def contract(model, to_what, dataset=None, maxiter=1000000, tol=0.01, use_direct_cp=True, method="Nelder-Mead",
verbosity=0):
"""
Contract a Model to a specified space.
All contraction operations except 'vSPAM' operate entirely on the gate
matrices and leave state preparations and measurments alone, while 'vSPAM'
operations only on SPAM.
Parameters
----------
model : Model
The model to contract
to_what : string
Specifies which space is the model is contracted to.
Allowed values are:
- 'TP' -- All gates are manifestly trace-preserving maps.
- 'CP' -- All gates are manifestly completely-positive maps.
- 'CPTP' -- All gates are manifestly completely-positive and trace-preserving maps.
- 'XP' -- All gates are manifestly "experimentally-positive" maps.
- 'XPTP' -- All gates are manifestly "experimentally-positive" and trace-preserving maps.
- 'vSPAM' -- state preparation and measurement operations are valid.
- 'nothing' -- no contraction is performed.
dataset : DataSet, optional
Dataset to use to determine whether a model is in the
"experimentally-positive" (XP) space. Required only when
contracting to XP or XPTP.
maxiter : int, optional
Maximum number of iterations for iterative contraction routines.
tol : float, optional
Tolerance for iterative contraction routines.
use_direct_cp : bool, optional
Whether to use a faster direct-contraction method for CP
contraction. This method essentially transforms to the
Choi matrix, truncates any negative eigenvalues to zero,
then transforms back to a operation matrix.
method : string, optional
The method used when contracting to XP and non-directly to CP
(i.e. use_direct_cp == False).
verbosity : int, optional
How much detail to send to stdout.
Returns
-------
Model
The contracted model
"""
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)
if to_what == 'CPTP':
if use_direct_cp:
_, contractedModel = _contract_to_cp_direct(model, printer, tp_also=True, maxiter=maxiter)
else:
_, contractedModel = _contract_to_tp(model, verbosity)
_, contractedModel = _contract_to_cp(contractedModel, printer, method, maxiter, tol)
elif to_what == 'XPTP':
if dataset is None: raise ValueError("dataset must be given to contract to " + to_what)
_, contractedModel = _contract_to_tp(model, verbosity)
_, contractedModel = _contract_to_xp(contractedModel, dataset, verbosity, method, maxiter, tol)
elif to_what == 'CP':
if use_direct_cp:
_, contractedModel = _contract_to_cp_direct(model, printer, tp_also=False, maxiter=maxiter)
else:
_, contractedModel = _contract_to_cp(model, printer, method, maxiter, tol)
elif to_what == 'TP':
_, contractedModel = _contract_to_tp(model, verbosity)
elif to_what == 'XP':
if dataset is None: raise ValueError("dataset must be given to contract to " + to_what)
_, contractedModel = _contract_to_xp(model, dataset, verbosity, method, maxiter, tol)
elif to_what == 'vSPAM':
contractedModel = _contract_to_valid_spam(model, printer)
elif to_what == 'nothing':
contractedModel = model.copy()
else: raise ValueError("Invalid contract argument: %s" % to_what)
return contractedModel
#modifies gates only (not rhoVecs or EVecs = SPAM)
def _contract_to_xp(model, dataset, verbosity, method='Nelder-Mead',
maxiter=100000, tol=1e-10):
CLIFF = 10000
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)
#printer.log('', 2)
printer.log("--- Contract to XP ---", 1)
mdl = model.copy() # working copy that we keep overwriting with vectorized data
def _objective_func(vector_gs):
mdl.from_vector(vector_gs)
forbiddenProbPenalty = _tools.forbidden_prob(mdl, dataset)
return (CLIFF + forbiddenProbPenalty if forbiddenProbPenalty > 1e-10 else 0) \
+ mdl.frobeniusdist(model)
bToStdout = (printer.verbosity > 2 and printer.filename is None)
print_obj_func = _opt.create_objfn_printer(_objective_func) # only ever prints to stdout!
if _objective_func(mdl.to_vector()) < 1e-8:
printer.log('Already in XP - no contraction necessary', 1)
return 0.0, mdl
optSol = _opt.minimize(_objective_func, mdl.to_vector(),
method=method, tol=tol, maxiter=maxiter,
callback=print_obj_func if bToStdout else None)
mdl.from_vector(optSol.x)
#mdl.log("Contract to XP", { 'method': method, 'tol': tol, 'maxiter': maxiter } )
if optSol.fun >= CLIFF: _warnings.warn("Failed to contract model to XP")
printer.log('The closest legal point found was distance: ' + str(optSol.fun), 1)
return optSol.fun, mdl
#modifies gates only (not rhoVecs or EVecs = SPAM)
def _contract_to_cp(model, verbosity, method='Nelder-Mead',
maxiter=100000, tol=1e-2):
CLIFF = 10000
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)
#printer.log('', 2)
printer.log("--- Contract to CP ---", 1)
mdl = model.copy() # working copy that we keep overwriting with vectorized data
mxBasis = mdl.basis
def _objective_func(vector_gs):
mdl.from_vector(vector_gs)
mdl.basis = mxBasis # set basis for jamiolkowski iso
cpPenalty = _tools.sum_of_negative_choi_eigenvalues(mdl) * 1000
return (CLIFF + cpPenalty if cpPenalty > 1e-10 else 0) + mdl.frobeniusdist(model)
bToStdout = (printer.verbosity > 2 and printer.filename is None)
print_obj_func = _opt.create_objfn_printer(_objective_func) # only ever prints to stdout!
if _objective_func(mdl.to_vector()) < 1e-8:
printer.log('Already in CP - no contraction necessary', 1)
return 0.0, mdl
optSol = _opt.minimize(_objective_func, mdl.to_vector(),
method=method, tol=tol, maxiter=maxiter,
callback=print_obj_func if bToStdout else None)
mdl.from_vector(optSol.x)
#mdl.log("Contract to CP", { 'method': method, 'tol': tol, 'maxiter': maxiter } )
if optSol.fun >= CLIFF: _warnings.warn("Failed to contract model to CP")
printer.log('The closest legal point found was distance: ' + str(optSol.fun), 1)
return optSol.fun, mdl
#modifies gates only (not rhoVecs or EVecs = SPAM)
def _contract_to_cp_direct(model, verbosity, tp_also=False, maxiter=100000, tol=1e-8):
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)
mdl = model.copy() # working copy that we keep overwriting with vectorized data
printer.log(("--- Contract to %s (direct) ---" % ("CPTP" if tp_also else "CP")), 1)
for (opLabel, gate) in model.operations.items():
new_op = gate.copy()
if(tp_also):
for k in range(new_op.shape[1]): new_op[0, k] = 1.0 if k == 0 else 0.0
Jmx = _tools.jamiolkowski_iso(new_op, op_mx_basis=mdl.basis, choi_mx_basis="gm")
evals, evecs = _np.linalg.eig(Jmx)
if tp_also:
assert(abs(sum(evals) - 1.0) < 1e-8) # check that Jmx always has trace == 1
#if abs( sum(evals) - 1.0 ) >= 1e-8: #DEBUG
# print "WARNING: JMx given with evals = %s (sum = %s != 1)" % (evals,sum(evals))
# print "WARNING: JMx from: "; _tools.print_mx(new_op)
it = 0
while min(evals) < -tol or abs(sum(evals) - 1.0) >= tol:
#Project eigenvalues to being all positive
new_evals = evals[:]
#New projection code
# don't need .real in theory, but small im parts can snowball in practice
new_evals = [max(ev.real, 0) for ev in new_evals]
# amount (usually/always < 0) needed to add to eigenvalues to make sum == 1
total_shift = 1.0 - sum(new_evals)
# (index,eval) tuples sorted by eval
sorted_evals_with_inds = sorted(enumerate(new_evals), key=lambda x: x[1])
shift_left = total_shift
evals_left = len(sorted_evals_with_inds)
ideal_shift = shift_left / evals_left
for (i, sorted_eval) in sorted_evals_with_inds:
# loop over new_evals from smallest to largest (note all > 0)
evals_left -= 1 # number of eigenvalue beyond current eval (in sorted order)
if sorted_eval + ideal_shift >= 0:
new_evals[i] = sorted_eval + ideal_shift
shift_left -= ideal_shift
elif evals_left > 0:
new_evals[i] = 0
shift_left += sorted_eval
ideal_shift = shift_left / evals_left # divide remaining shift evenly among remaining eigenvalues
else:
# last eigenvalue would be < 0 with ideal shift and can't set == 0 b/c all others must be zero too
new_evals[i] = 1.0 # so set what was the largest eigenvalue == 1.0
#if abs( sum(new_evals) - 1.0 ) >= 1e-8: #DEBUG
# print "DEBUG: sum(new_evals) == ",sum(new_evals) #DEBUG
# print "DEBUG: new_evals == ",new_evals #DEBUG
# print "DEBUG: orig evals == ",evals #DEBUG
assert(abs(sum(new_evals) - 1.0) < 1e-8)
new_Jmx = _np.dot(evecs, _np.dot(_np.diag(new_evals), _np.linalg.inv(evecs)))
#Make trace preserving by zeroing out real parts of off diagonal blocks and imaginary parts
# within diagaonal 1x1 and 3x3 block (so really just the 3x3 block's off diag elements)
#assert(new_Jmx.shape == (4,4)) #NOTE: only works for 1-qubit case so far
kmax = new_Jmx.shape[0]
for k in range(1, kmax):
new_Jmx[0, k] = 1j * new_Jmx[0, k].imag
new_Jmx[k, 0] = 1j * new_Jmx[k, 0].imag
for i in range(1, kmax):
for j in range(1, kmax):
new_Jmx[i, j] = new_Jmx[i, j].real
evals, evecs = _np.linalg.eig(new_Jmx)
#DEBUG
#EVAL_TOL = 1e-10
#if abs( sum(evals) - 1.0 ) >= 1e-8:
# print "DEBUG2: sum(evals) == ",sum(evals)
# print "DEBUG2: evals == ",evals
#if min(evals) < -EVAL_TOL:
# print "DEBUG3: evals = ",evals
# Check that trace-trunc above didn't mess up positivity
assert(min(evals) | |
from color_trans_gradients_2 import delegator, create_undistorted_hls_image, create_sobel_image
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
# Loading camera calibration
cameraCalibration = pickle.load(open('serialized_camera_data/camera_calibration.p', 'rb'))
mtx, dist = map(cameraCalibration.get, ('mtx', 'dist'))
# Load test images.
test_images_with_names = list(map(lambda imageFileName: (imageFileName, cv2.imread(imageFileName)),
glob.glob('./test_images/*.jpg')))
original_image = test_images_with_names[1][1]
hls_image = create_undistorted_hls_image(original_image)
create_saturation_channel_images = lambda img: create_undistorted_hls_image(img)[:, :, 2]
take_sobel_in_X = lambda img: create_sobel_image(create_saturation_channel_images(img), thresh_min=10, thresh_max=160)
take_sobel_in_Y = lambda img: create_sobel_image(create_saturation_channel_images(img), direction='y', thresh_min=10,
thresh_max=160)
def combine_sobel_gradients(img):
"""
Here we calculate the sobel along x & y
"""
sobel_X = take_sobel_in_X(img)
sobel_Y = take_sobel_in_Y(img)
combined_sobel = np.zeros_like(sobel_X)
combined_sobel[((sobel_X == 1) & (sobel_Y == 1))] = 1
return combined_sobel
combined_sobel_image = delegator(test_images_with_names, combine_sobel_gradients, display_image=False)
perspective_matrix = pickle.load(open('serialized_camera_data/perspective_transform.p', 'rb'))
M, Minv = map(perspective_matrix.get, ('M', 'Minv'))
def do_perspective_transformation(image, M=M):
"""
Adjust the `image` using the transformation matrix `M`.
"""
img_size = (image.shape[1], image.shape[0])
warped = cv2.warpPerspective(image, M, img_size)
return warped
do_combine_sobel_transform = lambda img: do_perspective_transformation(combine_sobel_gradients(img))
transformed_binary_images = delegator(test_images_with_names, do_combine_sobel_transform, display_image=False,
cmap='gray')
# conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
def search_lanes_and_fit_polynomial(image, nwindows=9, margin=110, minpix=50):
"""
This Function search the lane pixels & then try to fit the polynomial on both lanes.
Returns (left_fit, right_fit, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy)
"""
# get a perpective transformed image
binary_warped_image = do_combine_sobel_transform(image)
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped_image[binary_warped_image.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped_image, binary_warped_image, binary_warped_image)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(binary_warped_image.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# iterate through the windows one by one as we have to cover whole lane
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
y_low_coordinate = binary_warped_image.shape[0] - (window + 1) * window_height
y_high_coordinate = binary_warped_image.shape[0] - window * window_height
x_left_low = leftx_current - margin
x_left_high = leftx_current + margin
x_right_low = rightx_current - margin
x_right_high = rightx_current + margin
# Draw the windows on the visualization image, this draw a rectangle(window) on each iteration
cv2.rectangle(out_img, (x_left_low, y_low_coordinate), (x_left_high, y_high_coordinate), (0, 255, 0), 2)
cv2.rectangle(out_img, (x_right_low, y_low_coordinate), (x_right_high, y_high_coordinate), (0, 255, 0), 2)
# These are the coordinates which are inside our window
good_left_inds = ((nonzeroy >= y_low_coordinate) & (nonzeroy < y_high_coordinate) & (nonzerox >= x_left_low) & (
nonzerox < x_left_high)).nonzero()[0]
good_right_inds = \
((nonzeroy >= y_low_coordinate) & (nonzeroy < y_high_coordinate) & (nonzerox >= x_right_low) & (
nonzerox < x_right_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each lane, this representation is in pixels
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# this representation is for real word
# Fit a second order polynomial to each lane
left_fit_m = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_m = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
return (left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy)
def draw_windows_and_fitted_lines(image, ax):
"""
This method draws the windows and fitted line on each image with the help of 'search_lanes_and_fit_polynomial' Fn.
Returns (`left_fit` and `right_fit`)
"""
left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = search_lanes_and_fit_polynomial(
image)
# Visualization
ploty = np.linspace(0, image.shape[0] - 1, image.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# color left lane with red
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
# color right lane with blue
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
ax.imshow(out_img)
# plotting the fitted curve
ax.plot(left_fitx, ploty, color='yellow')
ax.plot(right_fitx, ploty, color='yellow')
return (left_fit, right_fit, left_fit_m, right_fit_m)
def draw_lane_lines_on_all_images(images, cols=2, rows=3, figsize=(15, 13)):
"""
This method calls draw_windows_and_fitted_lines Fn for each image and then show the grid of output images.
"""
no_of_images = len(images)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
indexes = range(cols * rows)
image_path_with_fitted_parameters = []
for ax, index in zip(axes.flat, indexes):
if index < no_of_images:
image_path, image = images[index]
left_fit, right_fit, left_fit_m, right_fit_m = draw_windows_and_fitted_lines(image, ax)
ax.set_title(image_path)
ax.axis('off')
image_path_with_fitted_parameters.append((image_path, left_fit, right_fit, left_fit_m, right_fit_m))
fig.show()
return image_path_with_fitted_parameters
# Lets do some action and draw the polygon & windows on images
images_top_view_with_curve = draw_lane_lines_on_all_images(test_images_with_names)
print('done')
# here we calculating curvature
def find_radius_of_curvature(yRange, left_fit_cr):
"""
This Fn finds & returns the curvature of the polynomial
"""
return ((1 + (2 * left_fit_cr[0] * yRange * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
for image_polygon in images_top_view_with_curve:
image_path, left_fit, right_fit, left_fit_m, right_fit_m = image_polygon
max_Y = 719
# converting from meters to kilometers
leftCurvature = find_radius_of_curvature(max_Y, left_fit_m) / 1000
rightCurvature = find_radius_of_curvature(max_Y, right_fit_m) / 1000
print('Image : {}, Left : {:.2f} km, Right : {:.2f} km'.format(image_path, leftCurvature, rightCurvature))
# Warp the lane boundaries on top of image
def fill_the_lane_area(img, left_fit, right_fit):
"""
This Fn calculate the polynomial & fill the lanes area using fillPolly method.
"""
yMax = img.shape[0]
ploty = np.linspace(0, yMax - 1, yMax)
color_warp = np.zeros_like(img).astype(np.uint8)
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# format the points for fillPoly method
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Fill the lanes area onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank image back to original image space using inverse perspective matrix we calculated earlier.
new_warped_image = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
return cv2.addWeighted(img, 1, new_warped_image, 0.3, 0)
def draw_lanes_visualization(img):
"""
This Fn search the lane lines and best polynomial fit and then fills the lane area with the help
of 'fill_the_lane_area' Fn.
"""
# calculating the polynomial parameters
left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = search_lanes_and_fit_polynomial(
img)
result = fill_the_lane_area(img, left_fit, right_fit)
return cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
images_with_lane_area = delegator(test_images_with_names, draw_lanes_visualization)
print('done')
# car position
def find_car_position_and_show_output(img, fontScale=2):
"""
This Fn find the car position and reflect the output on image
"""
left_fit, right_fit, left_fit_m, right_fit_m, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = search_lanes_and_fit_polynomial(
img)
output = fill_the_lane_area(img, left_fit, right_fit)
# Calculate curvature
left_curvature = find_radius_of_curvature(max_Y, left_fit_m)
right_curvature = find_radius_of_curvature(max_Y, right_fit_m)
# find car position
# maximum distance along x-axis in meters
x_max_mtrs = img.shape[1] * xm_per_pix
# maximum distance along y-axis in meters
y_max_mtrs = img.shape[0] * ym_per_pix
car_center_pt = x_max_mtrs / 2
# left line position in meters from left
left_line = left_fit_m[0] * y_max_mtrs ** 2 + left_fit_m[1] * y_max_mtrs + left_fit_m[2]
# right line position in meters from left
right_line = right_fit_m[0] * y_max_mtrs ** 2 + right_fit_m[1] * y_max_mtrs + right_fit_m[2]
# middle_point on center which is a refrence point to calculate the car distance
middle_line = left_line + (right_line - left_line) / 2
# car position from center
car_distance_from_center = middle_line - car_center_pt
if car_distance_from_center > 0:
message = '{:.2f} m right'.format(car_distance_from_center)
else:
message = '{:.2f} m left'.format(-car_distance_from_center)
# Writing the text on image
font = cv2.FONT_HERSHEY_SIMPLEX
font_color = (255, 255, 255)
cv2.putText(output, 'Left curvature: {:.0f} | |
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List, Any
class GetConferenceDetailHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetConferenceDetailResponseBodyMemberList(TeaModel):
def __init__(
self,
union_id: str = None,
name: str = None,
attend_duration: float = None,
staff_id: str = None,
):
# 用户uid
self.union_id = union_id
# 用户昵称
self.name = name
# 参会时长
self.attend_duration = attend_duration
# 员工id
self.staff_id = staff_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.name is not None:
result['name'] = self.name
if self.attend_duration is not None:
result['attendDuration'] = self.attend_duration
if self.staff_id is not None:
result['staffId'] = self.staff_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('attendDuration') is not None:
self.attend_duration = m.get('attendDuration')
if m.get('staffId') is not None:
self.staff_id = m.get('staffId')
return self
class GetConferenceDetailResponseBody(TeaModel):
def __init__(
self,
conference_id: str = None,
title: str = None,
conf_start_time: float = None,
duration: float = None,
total_num: int = None,
attendee_num: int = None,
attendee_percentage: str = None,
caller_id: str = None,
caller_name: str = None,
member_list: List[GetConferenceDetailResponseBodyMemberList] = None,
):
# 会议ID
self.conference_id = conference_id
# 会议标题
self.title = title
# 开始时间
self.conf_start_time = conf_start_time
# 持续时间
self.duration = duration
# 会议人数
self.total_num = total_num
# 出席会议人数
self.attendee_num = attendee_num
# 出席率
self.attendee_percentage = attendee_percentage
# 发起人uid
self.caller_id = caller_id
# 发起人昵称
self.caller_name = caller_name
# 参会人员列表
self.member_list = member_list
def validate(self):
if self.member_list:
for k in self.member_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conference_id is not None:
result['conferenceId'] = self.conference_id
if self.title is not None:
result['title'] = self.title
if self.conf_start_time is not None:
result['confStartTime'] = self.conf_start_time
if self.duration is not None:
result['duration'] = self.duration
if self.total_num is not None:
result['totalNum'] = self.total_num
if self.attendee_num is not None:
result['attendeeNum'] = self.attendee_num
if self.attendee_percentage is not None:
result['attendeePercentage'] = self.attendee_percentage
if self.caller_id is not None:
result['callerId'] = self.caller_id
if self.caller_name is not None:
result['callerName'] = self.caller_name
result['memberList'] = []
if self.member_list is not None:
for k in self.member_list:
result['memberList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conferenceId') is not None:
self.conference_id = m.get('conferenceId')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('confStartTime') is not None:
self.conf_start_time = m.get('confStartTime')
if m.get('duration') is not None:
self.duration = m.get('duration')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
if m.get('attendeeNum') is not None:
self.attendee_num = m.get('attendeeNum')
if m.get('attendeePercentage') is not None:
self.attendee_percentage = m.get('attendeePercentage')
if m.get('callerId') is not None:
self.caller_id = m.get('callerId')
if m.get('callerName') is not None:
self.caller_name = m.get('callerName')
self.member_list = []
if m.get('memberList') is not None:
for k in m.get('memberList'):
temp_model = GetConferenceDetailResponseBodyMemberList()
self.member_list.append(temp_model.from_map(k))
return self
class GetConferenceDetailResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetConferenceDetailResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetConferenceDetailResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetUserAppVersionSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetUserAppVersionSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetUserAppVersionSummaryResponseBodyData(TeaModel):
def __init__(
self,
stat_date: str = None,
org_name: str = None,
client: str = None,
app_version: str = None,
user_cnt: float = None,
):
# 统计日期
self.stat_date = stat_date
# 组织名称
self.org_name = org_name
# 端信息
self.client = client
# 版本信息
self.app_version = app_version
# 用户数
self.user_cnt = user_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.org_name is not None:
result['orgName'] = self.org_name
if self.client is not None:
result['client'] = self.client
if self.app_version is not None:
result['appVersion'] = self.app_version
if self.user_cnt is not None:
result['userCnt'] = self.user_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('orgName') is not None:
self.org_name = m.get('orgName')
if m.get('client') is not None:
self.client = m.get('client')
if m.get('appVersion') is not None:
self.app_version = m.get('appVersion')
if m.get('userCnt') is not None:
self.user_cnt = m.get('userCnt')
return self
class GetUserAppVersionSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetUserAppVersionSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 用户版本分布情况列表
self.data = data
# 下一次请求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetUserAppVersionSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetUserAppVersionSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetUserAppVersionSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetUserAppVersionSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DeleteCommentHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.