input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
OpCode, field: _n_4_t_12):...
def Emit(self, opcode: OpCode, labels: _n_0_t_1[Label]):...
def Emit(self, opcode: OpCode, label: Label):...
def Emit(self, opcode: OpCode, arg: float):...
def Emit(self, opcode: OpCode, cls: _n_0_t_6):...
def Emit(self, opcode: OpCode, con: _n_4_t_6):...
def Emit(self, opcode: OpCode, signature: SignatureHelper):...
def Emit(self, opcode: OpCode, meth: _n_4_t_7):...
def Emit(self, opcode: OpCode, arg: int):...
def Emit(self, opcode: OpCode, arg: _n_0_t_12):...
def Emit(self, opcode: OpCode, arg: _n_0_t_0):...
def Emit(self, opcode: OpCode):...
def EmitCall(self, opcode: OpCode, methodInfo: _n_4_t_7, optionalParameterTypes: _n_0_t_1[_n_0_t_6]):...
def EmitCalli(self, opcode: OpCode, unmanagedCallConv: _n_6_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6]):...
def EmitCalli(self, opcode: OpCode, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6], optionalParameterTypes: _n_0_t_1[_n_0_t_6]):...
def EmitWriteLine(self, fld: _n_4_t_12):...
def EmitWriteLine(self, localBuilder: LocalBuilder):...
def EmitWriteLine(self, value: str):...
def EndExceptionBlock(self):...
def EndScope(self):...
def MarkLabel(self, loc: Label):...
def MarkSequencePoint(self, document: _n_2_t_0, startLine: int, startColumn: int, endLine: int, endColumn: int):...
def ThrowException(self, excType: _n_0_t_6):...
def UsingNamespace(self, usingNamespace: str):...
class Label(_n_0_t_10):
pass
class LocalBuilder(_n_4_t_20, _n_6_t_15):
def SetLocalSymInfo(self, name: str, startOffset: int, endOffset: int):...
def SetLocalSymInfo(self, name: str):...
class MethodBuilder(_n_4_t_7, _n_4_t_1, _n_6_t_2, _n_6_t_3, _n_6_t_7, _n_6_t_16):
@property
def InitLocals(self) -> bool:"""InitLocals { get; set; } -> bool"""
@property
def Signature(self) -> str:"""Signature { get; } -> str"""
def AddDeclarativeSecurity(self, action: _n_9_t_0, pset: _n_8_t_1):...
def CreateMethodBody(self, il: _n_0_t_1[_n_0_t_0], count: int):...
def DefineGenericParameters(self, names: _n_0_t_1[str]) -> _n_0_t_1[GenericTypeParameterBuilder]:...
def DefineParameter(self, position: int, attributes: _n_4_t_8, strParamName: str) -> ParameterBuilder:...
def GetILGenerator(self, size: int) -> ILGenerator:...
def GetILGenerator(self) -> ILGenerator:...
def GetModule(self) -> _n_4_t_9:...
def GetToken(self) -> MethodToken:...
def SetCustomAttribute(self, customBuilder: CustomAttributeBuilder):...
def SetCustomAttribute(self, con: _n_4_t_6, binaryAttribute: _n_0_t_1[_n_0_t_0]):...
def SetImplementationFlags(self, attributes: _n_4_t_10):...
def SetMarshal(self, unmanagedMarshal: UnmanagedMarshal):...
def SetMethodBody(self, il: _n_0_t_1[_n_0_t_0], maxStack: int, localSignature: _n_0_t_1[_n_0_t_0], exceptionHandlers: _n_1_t_0[ExceptionHandler], tokenFixups: _n_1_t_0[int]):...
def SetParameters(self, parameterTypes: _n_0_t_1[_n_0_t_6]):...
def SetReturnType(self, returnType: _n_0_t_6):...
def SetSignature(self, returnType: _n_0_t_6, returnTypeRequiredCustomModifiers: _n_0_t_1[_n_0_t_6], returnTypeOptionalCustomModifiers: _n_0_t_1[_n_0_t_6], parameterTypes: _n_0_t_1[_n_0_t_6], parameterTypeRequiredCustomModifiers: _n_0_t_1[_n_0_t_1[_n_0_t_6]], parameterTypeOptionalCustomModifiers: _n_0_t_1[_n_0_t_1[_n_0_t_6]]):...
def SetSymCustomAttribute(self, name: str, data: _n_0_t_1[_n_0_t_0]):...
class MethodRental(_n_6_t_17):
JitImmediate: int
JitOnDemand: int
@staticmethod
def SwapMethodBody(cls: _n_0_t_6, methodtoken: int, rgIL: _n_0_t_13, methodSize: int, flags: int):...
class MethodToken(_n_0_t_10):
Empty: int
@property
def Token(self) -> int:"""Token { get; } -> int"""
class ModuleBuilder(_n_4_t_9, _n_6_t_18, _n_7_t_0, _n_4_t_1, _n_6_t_19):
def CreateGlobalFunctions(self):...
def DefineDocument(self, url: str, language: _n_0_t_14, languageVendor: _n_0_t_14, documentType: _n_0_t_14) -> _n_2_t_0:...
def DefineEnum(self, name: str, visibility: _n_4_t_21, underlyingType: _n_0_t_6) -> EnumBuilder:...
def DefineGlobalMethod(self, name: str, attributes: _n_4_t_13, callingConvention: _n_4_t_14, returnType: _n_0_t_6, requiredReturnTypeCustomModifiers: _n_0_t_1[_n_0_t_6], optionalReturnTypeCustomModifiers: _n_0_t_1[_n_0_t_6], parameterTypes: _n_0_t_1[_n_0_t_6], requiredParameterTypeCustomModifiers: _n_0_t_1[_n_0_t_1[_n_0_t_6]], optionalParameterTypeCustomModifiers: _n_0_t_1[_n_0_t_1[_n_0_t_6]]) -> MethodBuilder:...
def DefineGlobalMethod(self, name: str, attributes: _n_4_t_13, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6]) -> MethodBuilder:...
def DefineGlobalMethod(self, name: str, attributes: _n_4_t_13, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6]) -> MethodBuilder:...
def DefineInitializedData(self, name: str, data: _n_0_t_1[_n_0_t_0], attributes: _n_4_t_22) -> FieldBuilder:...
def DefineManifestResource(self, name: str, stream: _n_3_t_0, attribute: _n_4_t_2):...
def DefinePInvokeMethod(self, name: str, dllName: str, entryName: str, attributes: _n_4_t_13, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6], nativeCallConv: _n_6_t_14, nativeCharSet: _n_6_t_20) -> MethodBuilder:...
def DefinePInvokeMethod(self, name: str, dllName: str, attributes: _n_4_t_13, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6], nativeCallConv: _n_6_t_14, nativeCharSet: _n_6_t_20) -> MethodBuilder:...
def DefineResource(self, name: str, description: str, attribute: _n_4_t_2) -> _n_5_t_0:...
def DefineResource(self, name: str, description: str) -> _n_5_t_0:...
def DefineType(self, name: str, attr: _n_4_t_21, parent: _n_0_t_6, packsize: PackingSize) -> TypeBuilder:...
def DefineType(self, name: str, attr: _n_4_t_21, parent: _n_0_t_6, packingSize: PackingSize, typesize: int) -> TypeBuilder:...
def DefineType(self, name: str, attr: _n_4_t_21, parent: _n_0_t_6, typesize: int) -> TypeBuilder:...
def DefineType(self, name: str, attr: _n_4_t_21, parent: _n_0_t_6) -> TypeBuilder:...
def DefineType(self, name: str, attr: _n_4_t_21) -> TypeBuilder:...
def DefineType(self, name: str) -> TypeBuilder:...
def DefineType(self, name: str, attr: _n_4_t_21, parent: _n_0_t_6, interfaces: _n_0_t_1[_n_0_t_6]) -> TypeBuilder:...
def DefineUninitializedData(self, name: str, size: int, attributes: _n_4_t_22) -> FieldBuilder:...
def DefineUnmanagedResource(self, resourceFileName: str):...
def DefineUnmanagedResource(self, resource: _n_0_t_1[_n_0_t_0]):...
def GetArrayMethod(self, arrayClass: _n_0_t_6, methodName: str, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6]) -> _n_4_t_7:...
def GetArrayMethodToken(self, arrayClass: _n_0_t_6, methodName: str, callingConvention: _n_4_t_14, returnType: _n_0_t_6, parameterTypes: _n_0_t_1[_n_0_t_6]) -> MethodToken:...
def GetConstructorToken(self, con: _n_4_t_6) -> MethodToken:...
def GetConstructorToken(self, constructor: _n_4_t_6, optionalParameterTypes: _n_1_t_0[_n_0_t_6]) -> MethodToken:...
def GetFieldToken(self, field: _n_4_t_12) -> FieldToken:...
def GetMethodToken(self, method: _n_4_t_7, optionalParameterTypes: _n_1_t_0[_n_0_t_6]) -> MethodToken:...
def GetMethodToken(self, method: _n_4_t_7) -> MethodToken:...
def GetSignatureToken(self, sigBytes: _n_0_t_1[_n_0_t_0], sigLength: int) -> SignatureToken:...
def GetSignatureToken(self, sigHelper: SignatureHelper) -> SignatureToken:...
def GetStringConstant(self, str: str) -> StringToken:...
def GetSymWriter(self) -> _n_2_t_1:...
def GetTypeToken(self, name: str) -> TypeToken:...
def GetTypeToken(self, type: _n_0_t_6) -> TypeToken:...
def IsTransient(self) -> bool:...
def SetCustomAttribute(self, customBuilder: CustomAttributeBuilder):...
def SetCustomAttribute(self, con: _n_4_t_6, binaryAttribute: _n_0_t_1[_n_0_t_0]):...
def SetSymCustomAttribute(self, name: str, data: _n_0_t_1[_n_0_t_0]):...
def SetUserEntryPoint(self, entryPoint: _n_4_t_7):...
class OpCode(_n_0_t_10):
@property
def FlowControl(self) -> FlowControl:"""FlowControl { get; } -> FlowControl"""
@property
def Name(self) -> str:"""Name { get; } -> str"""
@property
def OpCodeType(self) -> OpCodeType:"""OpCodeType { get; } -> OpCodeType"""
@property
def OperandType(self) -> OperandType:"""OperandType { get; } -> OperandType"""
@property
def Size(self) -> int:"""Size { get; } -> int"""
@property
def StackBehaviourPop(self) -> StackBehaviour:"""StackBehaviourPop { get; } -> StackBehaviour"""
@property
def StackBehaviourPush(self) -> StackBehaviour:"""StackBehaviourPush { get; } -> StackBehaviour"""
@property
def Value(self) -> int:"""Value { get; } -> int"""
class OpCodes(object):
Add: int
Add_Ovf: int
Add_Ovf_Un: int
And: int
Arglist: int
Beq: int
Beq_S: int
Bge: int
Bge_S: int
Bge_Un: int
Bge_Un_S: int
Bgt: int
Bgt_S: int
Bgt_Un: int
Bgt_Un_S: int
Ble: int
Ble_S: int
Ble_Un: int
Ble_Un_S: int
Blt: int
Blt_S: int
Blt_Un: int
Blt_Un_S: int
Bne_Un: int
Bne_Un_S: int
Box: int
Br: int
Br_S: int
Break: int
Brfalse: int
Brfalse_S: int
Brtrue: int
Brtrue_S: int
Call: int
Calli: int
Callvirt: int
Castclass: int
Ceq: int
Cgt: int
Cgt_Un: int
Ckfinite: int
Clt: int
Clt_Un: int
Constrained: int
Conv_I: int
Conv_I1: int
Conv_I2: int
Conv_I4: int
Conv_I8: int
Conv_Ovf_I: int
Conv_Ovf_I_Un: int
Conv_Ovf_I1: int
Conv_Ovf_I1_Un: int
Conv_Ovf_I2: int
Conv_Ovf_I2_Un: int
Conv_Ovf_I4: int
Conv_Ovf_I4_Un: int
Conv_Ovf_I8: int
Conv_Ovf_I8_Un: int
Conv_Ovf_U: int
Conv_Ovf_U_Un: int
Conv_Ovf_U1: int
Conv_Ovf_U1_Un: int
Conv_Ovf_U2: int
Conv_Ovf_U2_Un: int
Conv_Ovf_U4: int
Conv_Ovf_U4_Un: int
Conv_Ovf_U8: int
Conv_Ovf_U8_Un: int
Conv_R_Un: int
Conv_R4: int
Conv_R8: int
Conv_U: int
Conv_U1: int
Conv_U2: int
Conv_U4: int
Conv_U8: int
Cpblk: int
Cpobj: int
Div: int
Div_Un: int
Dup: int
Endfilter: int
Endfinally: int
Initblk: int
Initobj: int
Isinst: int
Jmp: int
Ldarg: int
Ldarg_0: int
Ldarg_1: int
Ldarg_2: int
Ldarg_3: int
Ldarg_S: int
Ldarga: int
Ldarga_S: int
Ldc_I4: int
Ldc_I4_0: int
Ldc_I4_1: int
Ldc_I4_2: int
Ldc_I4_3: int
Ldc_I4_4: int
Ldc_I4_5: int
Ldc_I4_6: int
Ldc_I4_7: int
Ldc_I4_8: int
Ldc_I4_M1: int
Ldc_I4_S: int
Ldc_I8: int
Ldc_R4: int
Ldc_R8: int
Ldelem: int
Ldelem_I: int
Ldelem_I1: int
Ldelem_I2: int
Ldelem_I4: int
Ldelem_I8: int
Ldelem_R4: int
Ldelem_R8: int
Ldelem_Ref: int
Ldelem_U1: int
Ldelem_U2: int
Ldelem_U4: int
Ldelema: int
Ldfld: int
Ldflda: int
Ldftn: int
Ldind_I: int
Ldind_I1: int
Ldind_I2: int
Ldind_I4: int
Ldind_I8: int
Ldind_R4: int
Ldind_R8: int
Ldind_Ref: int
Ldind_U1: int
Ldind_U2: int
Ldind_U4: int
Ldlen: int
Ldloc: int
Ldloc_0: int
Ldloc_1: int
Ldloc_2: int
Ldloc_3: int
Ldloc_S: int
Ldloca: int
Ldloca_S: int
Ldnull: int
Ldobj: int
Ldsfld: int
Ldsflda: int
Ldstr: int
Ldtoken: int
Ldvirtftn: int
Leave: int
Leave_S: int
Localloc: int
Mkrefany: int
Mul: int
Mul_Ovf: int
Mul_Ovf_Un: int
Neg: int
Newarr: int
Newobj: int
Nop: int
Not: int
Or: int
Pop: int
Prefix1: int
Prefix2: int
Prefix3: int
Prefix4: int
Prefix5: int
Prefix6: int
Prefix7: int
Prefixref: int
Readonly: int
Refanytype: int
Refanyval: int
Rem: int
Rem_Un: int
Ret: int
Rethrow: int
Shl: int
Shr: int
Shr_Un: int
Sizeof: int
Starg: int
Starg_S: int
Stelem: int
Stelem_I: int
Stelem_I1: int
Stelem_I2: int
Stelem_I4: int
Stelem_I8: int
Stelem_R4: int
Stelem_R8: int
Stelem_Ref: int
Stfld: int
Stind_I: int
Stind_I1: int
Stind_I2: int
Stind_I4: int
Stind_I8: int
Stind_R4: int
Stind_R8: int
Stind_Ref: int
Stloc: int
Stloc_0: int
Stloc_1: int
Stloc_2: int
Stloc_3: int
Stloc_S: int
Stobj: int
Stsfld: int
Sub: int
Sub_Ovf: int
Sub_Ovf_Un: int
Switch: int
Tailcall: int
Throw: int
Unaligned: int
Unbox: int
Unbox_Any: int
Volatile: int
Xor: int
@staticmethod
def TakesSingleByteArgument(inst: OpCode) -> bool:...
class OpCodeType(_n_0_t_2, _n_0_t_3, _n_0_t_4, _n_0_t_5):
Annotation: int
Macro: int
Nternal: int
Objmodel: int
Prefix: int
Primitive: int
value__: int
class OperandType(_n_0_t_2, _n_0_t_3, _n_0_t_4, _n_0_t_5):
InlineBrTarget: int
InlineField: int
InlineI: int
InlineI8: int
InlineMethod: int
InlineNone: int
InlinePhi: int
InlineR: int
InlineSig: int
InlineString: int
InlineSwitch: int
InlineTok: int
InlineType: int
InlineVar: int
ShortInlineBrTarget: int
ShortInlineI: int
ShortInlineR: int
ShortInlineVar: | |
'''
LICENSING
-------------------------------------------------
daemoniker: Cross-platform daemonization tools.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
<NAME>
<EMAIL> | <EMAIL> | <EMAIL>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
# Global dependencies
import logging
import traceback
import os
import sys
import signal
import pickle
import base64
import subprocess
import shlex
import tempfile
import atexit
# Intra-package dependencies
from .utils import platform_specificker
from .utils import default_to
from ._daemonize_common import _redirect_stds
from ._daemonize_common import _write_pid
from ._daemonize_common import _acquire_pidfile
_SUPPORTED_PLATFORM = platform_specificker(
linux_choice = False,
win_choice = True,
# Dunno if this is a good idea but might as well try
cygwin_choice = True,
osx_choice = False,
other_choice = False
)
# ###############################################
# Boilerplate
# ###############################################
logger = logging.getLogger(__name__)
# Control * imports.
__all__ = [
# 'Inquisitor',
]
# ###############################################
# Library
# ###############################################
class Daemonizer:
''' Emulates Unix daemonization and registers all appropriate
cleanup functions.
with Daemonizer() as (is_setup, daemonize):
if is_setup:
setup_code_here()
else:
this_will_not_be_run_on_unix()
*args = daemonize(*daemonizer_args, *args)
'''
def __init__(self):
''' Inspect the environment and determine if we're the parent
or the child.
'''
self._is_parent = None
self._daemonize_called = None
def _daemonize(self, *args, **kwargs):
''' Very simple pass-through that does not exit the caller.
'''
self._daemonize_called = True
if self._is_parent:
return _daemonize1(*args, _exit_caller=False, **kwargs)
else:
return _daemonize2(*args, **kwargs)
def __enter__(self):
self._daemonize_called = False
if '__INVOKE_DAEMON__' in os.environ:
self._is_parent = False
else:
self._is_parent = True
# In both cases, just return _is_parent and _daemonize
return self._is_parent, self._daemonize
def __exit__(self, exc_type, exc_value, exc_tb):
''' Exit doesn't really need to do any cleanup. But, it's needed
for context managing.
'''
# This should only happen if __exit__ was called directly, without
# first calling __enter__
if self._daemonize_called is None:
self._is_parent = None
raise RuntimeError('Context manager was inappropriately exited.')
# This will happen if we used the context manager, but never actually
# called to daemonize.
elif not self._daemonize_called:
self._daemonize_called = None
self._is_parent = None
logger.warning('Daemonizer exited without calling daemonize.')
# Note that any encountered error will be raise once the context is
# departed, so there's no reason to handle or log errors here.
return
# We called to daemonize, and this is the parent.
elif self._is_parent:
# If there was an exception, give some information before the
# summary self-execution that is os._exit
if exc_type is not None:
logger.error(
'Exception in parent:\n' +
''.join(traceback.format_tb(exc_tb)) + '\n' +
repr(exc_value)
)
print(
'Exception in parent:\n' +
''.join(traceback.format_tb(exc_tb)) + '\n' +
repr(exc_value),
file=sys.stderr
)
os._exit(2)
else:
os._exit(0)
# We called to daemonize, and this is the child.
else:
return
def _capability_check(pythonw_path, script_path):
''' Does a compatibility and capability check.
'''
if not _SUPPORTED_PLATFORM:
raise OSError(
'The Windows Daemonizer cannot be used on the current '
'platform.'
)
if not os.path.exists(pythonw_path):
raise SystemExit(
'pythonw.exe must be available in the same directory as the '
'current Python interpreter to support Windows daemonization.'
)
if not os.path.exists(script_path):
raise SystemExit(
'Daemonizer cannot locate the script to daemonize (it seems '
'to have lost itself).'
)
def _filial_usurpation(chdir):
''' Changes our working directory, helping decouple the child
process from the parent. Not necessary on windows, but could help
standardize stuff for cross-platform apps.
'''
# Well this is certainly a stub.
os.chdir(chdir)
def _clean_file(path):
''' Remove the file at path, if it exists, suppressing any errors.
'''
# Clean up the PID file.
try:
# This will raise if the child process had a chance to register
# and complete its exit handler.
os.remove(path)
# So catch that error if it happens.
except OSError:
pass
class _NamespacePasser:
''' Creates a path in a secure temporary directory, such that the
path can be used to write in a reentrant manner. Upon context exit,
the file will be overwritten with zeros, removed, and then the temp
directory cleaned up.
We can't use the normal tempfile stuff because:
1. it doesn't zero the file
2. it prevents reentrant opening
Using this in a context manager will return the path to the file as
the "as" target, ie, "with _ReentrantSecureTempfile() as path:".
'''
def __init__(self):
''' Store args and kwargs to pass into enter and exit.
'''
seed = os.urandom(16)
self._stem = base64.urlsafe_b64encode(seed).decode()
self._tempdir = None
self.name = None
def __enter__(self):
try:
# Create a resident tempdir
self._tempdir = tempfile.TemporaryDirectory()
# Calculate the final path
self.name = self._tempdir.name + '/' + self._stem
# Ensure the file exists, so future cleaning calls won't error
with open(self.name, 'wb'):
pass
except:
if self._tempdir is not None:
self._tempdir.cleanup()
raise
else:
return self.name
def __exit__(self, exc_type, exc_value, exc_tb):
''' Zeroes the file, removes it, and cleans up the temporary
directory.
'''
try:
# Open the existing file and overwrite it with zeros.
with open(self.name, 'r+b') as f:
to_erase = f.read()
eraser = bytes(len(to_erase))
f.seek(0)
f.write(eraser)
# Remove the file. We just accessed it, so it's guaranteed to exist
os.remove(self.name)
# Warn of any errors in the above, and then re-raise.
except:
logger.error(
'Error while shredding secure temp file.\n' +
''.join(traceback.format_exc())
)
raise
finally:
self._tempdir.cleanup()
def _fork_worker(namespace_path, child_env, pid_file, invocation, chdir,
stdin_goto, stdout_goto, stderr_goto, _exit_caller, args):
''' Opens a fork worker, shielding the parent from cancellation via
signal sending. Basically, thanks Windows for being a dick about
signals.
'''
# Find out our PID so the daughter can tell us to exit
my_pid = os.getpid()
# Pack up all of the args that the child will need to use.
# Prepend it to *args
payload = (my_pid, pid_file, chdir, stdin_goto, stdout_goto,
stderr_goto, _exit_caller) + args
# Pack it up. We're shielded from pickling errors already because pickle is
# needed to start the worker.
# Write the payload to the namespace passer using the highest available
# protocol
with open(namespace_path, 'wb') as f:
pickle.dump(payload, f, protocol=-1)
# Invoke the invocation!
daemon = subprocess.Popen(
invocation,
# This is important, because the parent _forkish is telling the child
# to run as a daemon via env. Also note that we need to calculate this
# in the root _daemonize1, or else we'll have a polluted environment
# due to the '__CREATE_DAEMON__' key.
env = child_env,
# This is vital; without it, our process will be reaped at parent
# exit.
creationflags = subprocess.CREATE_NEW_CONSOLE,
)
# Busy wait until either the daemon exits, or it sends a signal to kill us.
daemon.wait()
def _daemonize1(pid_file, *args, chdir=None, stdin_goto=None, stdout_goto=None,
stderr_goto=None, umask=0o027, shielded_fds=None,
fd_fallback_limit=1024, success_timeout=30,
strip_cmd_args=False, explicit_rescript=None,
_exit_caller=True):
''' Create an independent process for invocation, telling it to
store its "pid" in the pid_file (actually, the pid of its signal
listener). Payload is an iterable of variables to pass the invoked
command for returning from _respawnish.
Note that a bare call to this function will result in all code
before the daemonize() call to be run twice.
The daemon's pid will be recorded in pid_file, but creating a
SignalHandler will overwrite it with the signaling subprocess
PID, which will change after every received signal.
*args will be passed to child. Waiting for success signal will
timeout after success_timeout seconds.
strip_cmd_args will ignore all additional command-line args in the
second run.
all other args identical to unix version of daemonize.
| |
key.
# Starts the system settings activity.
SETTINGS = 176
# Key code constant: TV power key.
# On TV remotes, toggles the power on a television screen.
TV_POWER = 177
# Key code constant: TV input key.
# On TV remotes, switches the input on a television screen.
TV_INPUT = 178
# Key code constant: Set-top-box power key.
# On TV remotes, toggles the power on an external Set-top-box.
STB_POWER = 179
# Key code constant: Set-top-box input key.
# On TV remotes, switches the input mode on an external Set-top-box.
STB_INPUT = 180
# Key code constant: A/V Receiver power key.
# On TV remotes, toggles the power on an external A/V Receiver.
AVR_POWER = 181
# Key code constant: A/V Receiver input key.
# On TV remotes, switches the input mode on an external A/V Receiver.
AVR_INPUT = 182
# Key code constant: Red "programmable" key.
# On TV remotes, acts as a contextual/programmable key.
PROG_RED = 183
# Key code constant: Green "programmable" key.
# On TV remotes, actsas a contextual/programmable key.
PROG_GREEN = 184
# Key code constant: Yellow "programmable" key.
# On TV remotes, acts as a contextual/programmable key.
PROG_YELLOW = 185
# Key code constant: Blue "programmable" key.
# On TV remotes, acts as a contextual/programmable key.
PROG_BLUE = 186
# Key code constant: App switch key.
# Should bring up the application switcher dialog.
APP_SWITCH = 187
# Key code constant: Generic Game Pad Button #1.
BUTTON_1 = 188
# Key code constant: Generic Game Pad Button #2.
BUTTON_2 = 189
# Key code constant: Generic Game Pad Button #3.
BUTTON_3 = 190
# Key code constant: Generic Game Pad Button #4.
BUTTON_4 = 191
# Key code constant: Generic Game Pad Button #5.
BUTTON_5 = 192
# Key code constant: Generic Game Pad Button #6.
BUTTON_6 = 193
# Key code constant: Generic Game Pad Button #7.
BUTTON_7 = 194
# Key code constant: Generic Game Pad Button #8.
BUTTON_8 = 195
# Key code constant: Generic Game Pad Button #9.
BUTTON_9 = 196
# Key code constant: Generic Game Pad Button #10.
BUTTON_10 = 197
# Key code constant: Generic Game Pad Button #11.
BUTTON_11 = 198
# Key code constant: Generic Game Pad Button #12.
BUTTON_12 = 199
# Key code constant: Generic Game Pad Button #13.
BUTTON_13 = 200
# Key code constant: Generic Game Pad Button #14.
BUTTON_14 = 201
# Key code constant: Generic Game Pad Button #15.
BUTTON_15 = 202
# Key code constant: Generic Game Pad Button #16.
BUTTON_16 = 203
# Key code constant: Language Switch key.
# Toggles the current input language such as switching between English and Japanese on
# a QWERTY keyboard. On some devices, the same function may be performed by
# pressing Shift+Spacebar.
LANGUAGE_SWITCH = 204
# Key code constant: Manner Mode key.
# Toggles silent or vibrate mode on and off to make the device behave more politely
# in certain settings such as on a crowded train. On some devices, the key may only
# operate when long-pressed.
MANNER_MODE = 205
# Key code constant: 3D Mode key.
# Toggles the display between 2D and 3D mode.
MODE_3D = 206
# Key code constant: Contacts special function key.
# Used to launch an address book application.
CONTACTS = 207
# Key code constant: Calendar special function key.
# Used to launch a calendar application.
CALENDAR = 208
# Key code constant: Music special function key.
# Used to launch a music player application.
MUSIC = 209
# Key code constant: Calculator special function key.
# Used to launch a calculator application.
CALCULATOR = 210
# Key code constant: Japanese full-width / half-width key.
ZENKAKU_HANKAKU = 211
# Key code constant: Japanese alphanumeric key.
EISU = 212
# Key code constant: Japanese non-conversion key.
MUHENKAN = 213
# Key code constant: Japanese conversion key.
HENKAN = 214
# Key code constant: Japanese katakana / hiragana key.
KATAKANA_HIRAGANA = 215
# Key code constant: Japanese Yen key.
YEN = 216
# Key code constant: Japanese Ro key.
RO = 217
# Key code constant: Japanese kana key.
KANA = 218
# Key code constant: Assist key.
# Launches the global assist activity. Not delivered to applications.
ASSIST = 219
# Key code constant: Brightness Down key.
# Adjusts the screen brightness down.
BRIGHTNESS_DOWN = 220
# Key code constant: Brightness Up key.
# Adjusts the screen brightness up.
BRIGHTNESS_UP = 221
# Key code constant: Audio Track key.
# Switches the audio tracks.
MEDIA_AUDIO_TRACK = 222
# Key code constant: Sleep key.
# Puts the device to sleep. Behaves somewhat like {@link #POWER} but it
# has no effect if the device is already asleep.
SLEEP = 223
# Key code constant: Wakeup key.
# Wakes up the device. Behaves somewhat like {@link #POWER} but it
# has no effect if the device is already awake.
WAKEUP = 224
# Key code constant: Pairing key.
# Initiates peripheral pairing mode. Useful for pairing remote control
# devices or game controllers, especially if no other input mode is
# available.
PAIRING = 225
# Key code constant: Media Top Menu key.
# Goes to the top of media menu.
MEDIA_TOP_MENU = 226
# Key code constant: '11' key.
KEY_11 = 227
# Key code constant: '12' key.
KEY_12 = 228
# Key code constant: Last Channel key.
# Goes to the last viewed channel.
LAST_CHANNEL = 229
# Key code constant: TV data service key.
# Displays data services like weather, sports.
TV_DATA_SERVICE = 230
# Key code constant: Voice Assist key.
# Launches the global voice assist activity. Not delivered to applications.
VOICE_ASSIST = 231
# Key code constant: Radio key.
# Toggles TV service / Radio service.
TV_RADIO_SERVICE = 232
# Key code constant: Teletext key.
# Displays Teletext service.
TV_TELETEXT = 233
# Key code constant: Number entry key.
# Initiates to enter multi-digit channel nubmber when each digit key is assigned
# for selecting separate channel. Corresponds to Number Entry Mode (0x1D) of CEC
# User Control Code.
TV_NUMBER_ENTRY = 234
# Key code constant: Analog Terrestrial key.
# Switches to analog terrestrial broadcast service.
TV_TERRESTRIAL_ANALOG = 235
# Key code constant: Digital Terrestrial key.
# Switches to digital terrestrial broadcast service.
TV_TERRESTRIAL_DIGITAL = 236
# Key code constant: Satellite key.
# Switches to digital satellite broadcast service.
TV_SATELLITE = 237
# Key code constant: BS key.
# Switches to BS digital satellite broadcasting service available in Japan.
TV_SATELLITE_BS = 238
# Key code constant: CS key.
# Switches to CS digital satellite broadcasting service available in Japan.
TV_SATELLITE_CS = 239
# Key code constant: BS/CS key.
# Toggles between BS and CS digital satellite services.
TV_SATELLITE_SERVICE = 240
# Key code constant: Toggle Network key.
# Toggles selecting broacast services.
TV_NETWORK = 241
# Key code constant: Antenna/Cable key.
# Toggles broadcast input source between antenna and cable.
TV_ANTENNA_CABLE = 242
# Key code constant: HDMI #1 key.
# Switches to HDMI input #1.
TV_INPUT_HDMI_1 = 243
# Key code constant: HDMI #2 key.
# Switches to HDMI input #2.
TV_INPUT_HDMI_2 = 244
# Key code constant: HDMI #3 key.
# Switches to HDMI input #3.
TV_INPUT_HDMI_3 = 245
# Key code constant: HDMI #4 key.
# Switches to HDMI input #4.
TV_INPUT_HDMI_4 = 246
# Key code constant: Composite #1 key.
# Switches to composite video input #1.
TV_INPUT_COMPOSITE_1 = 247
# Key code constant: Composite #2 key.
# Switches to composite video input #2.
TV_INPUT_COMPOSITE_2 = 248
# Key code constant: Component #1 key.
# Switches to component video input #1.
TV_INPUT_COMPONENT_1 = 249
# Key code constant: Component #2 key.
# | |
<filename>sureal/subjective_model.py
import copy
from abc import ABCMeta, abstractmethod
import sys
import time
import numpy as np
from scipy import linalg
from scipy import stats
import pandas as pd
from scipy.stats import chi2, norm
from sureal.core.mixin import TypeVersionEnabled
from sureal.tools.misc import import_python_file, indices, weighed_nanmean_2d
from sureal.dataset_reader import RawDatasetReader
from sureal.tools.stats import vectorized_gaussian, vectorized_convolution_of_two_logistics, \
vectorized_convolution_of_two_uniforms
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
__metaclass__ = ABCMeta
class SubjectiveModel(TypeVersionEnabled):
"""
Base class for any model that takes the input of a subjective quality test
experiment dataset with raw scores (dis_video must has key of 'os' (opinion
score)) and output estimated quality for each impaired video (e.g. MOS, DMOS
or more advanced estimate of subjective quality).
A number of common functionalities are included: dscore_mode, zscore_mode,
normalize_final, transform_final, subject_rejection
"""
@classmethod
@abstractmethod
def _run_modeling(cls, dataset_reader, **kwargs):
raise NotImplementedError
def _assert_args(self):
assert isinstance(self.dataset_reader, RawDatasetReader)
def __init__(self, dataset_reader):
TypeVersionEnabled.__init__(self)
self.dataset_reader = dataset_reader
self._assert_args()
@classmethod
def _import_dataset_and_filter(cls, dataset_filepath, content_ids, asset_ids):
dataset = import_python_file(dataset_filepath)
if content_ids is not None:
dataset.dis_videos = [dis_video for dis_video in dataset.dis_videos if dis_video['content_id'] in content_ids]
if asset_ids is not None:
dataset.dis_videos = [dis_video for dis_video in dataset.dis_videos if dis_video['asset_id'] in asset_ids]
return dataset
@classmethod
def from_dataset_file(cls, dataset_filepath, content_ids=None, asset_ids=None):
dataset = cls._import_dataset_and_filter(dataset_filepath, content_ids, asset_ids)
dataset_reader = RawDatasetReader(dataset)
return cls(dataset_reader)
def run_modeling(self, **kwargs):
model_result = self._run_modeling(self.dataset_reader, **kwargs)
self._postprocess_model_result(model_result, **kwargs)
self.model_result = model_result
return model_result
def to_aggregated_dataset(self, **kwargs):
self._assert_modeled()
return self.dataset_reader.to_aggregated_dataset(
self.model_result['quality_scores'],
scores_std = self.model_result['quality_scores_std'] if 'quality_scores_std' in self.model_result else None,
**kwargs)
def to_aggregated_dataset_file(self, dataset_filepath, **kwargs):
self._assert_modeled()
self.dataset_reader.to_aggregated_dataset_file(
dataset_filepath,
self.model_result['quality_scores'],
scores_std = self.model_result['quality_scores_std'] if 'quality_scores_std' in self.model_result else None,
**kwargs)
def _assert_modeled(self):
assert hasattr(self, 'model_result'), \
"self.model_result doesn't exist. Run run_modeling() first."
assert 'quality_scores' in self.model_result, \
"self.model_result must have quality_scores."
@staticmethod
def _get_ref_mos(dataset_reader, mos):
ref_mos = []
for dis_video in dataset_reader.dataset.dis_videos:
# get the dis video's ref video's mos
curr_content_id = dis_video['content_id']
ref_indices = indices(
list(zip(dataset_reader.content_id_of_dis_videos,
dataset_reader.disvideo_is_refvideo)),
lambda content_id_is_refvideo:
content_id_is_refvideo[1] and content_id_is_refvideo[0] == curr_content_id
)
assert len(ref_indices) == 1, \
'Should have only and one ref video for a dis video, ' \
'but got {}'.format(len(ref_indices))
ref_idx = ref_indices[0]
ref_mos.append(mos[ref_idx])
return np.array(ref_mos)
@staticmethod
def _get_opinion_score_2darray_with_preprocessing(dataset_reader, **kwargs):
s_es = dataset_reader.opinion_score_2darray
original_opinion_score_2darray = copy.deepcopy(s_es)
ret = dict()
# dscore_mode: True - do differential-scoring
# False - don't do differential-scoring
dscore_mode = kwargs['dscore_mode'] if 'dscore_mode' in kwargs else False
# zscore_mode: True - do z-scoring (normalizing to 0-mean 1-std)
# False - don't do z-scoring
zscore_mode = kwargs['zscore_mode'] if 'zscore_mode' in kwargs else False
# bias_offset: True - do bias offset according to ITU-T P.913
# False - don't do bias offset
bias_offset = kwargs['bias_offset'] if 'bias_offset' in kwargs else False
# subject_rejection: True - do subject rejection
# False - don't do subject rejection
subject_rejection = kwargs['subject_rejection'] if 'subject_rejection' in kwargs else False
assert not (zscore_mode is True and bias_offset is True)
if dscore_mode is True:
# make sure dataset has ref_score
assert dataset_reader.dataset.ref_score is not None, \
"For differential score, dataset must have attribute ref_score."
E, S = s_es.shape
s_e = pd.DataFrame(s_es).mean(axis=1) # mean along s
s_e_ref = DmosModel._get_ref_mos(dataset_reader, s_e)
s_es = s_es + dataset_reader.ref_score - np.tile(s_e_ref, (S, 1)).T
if zscore_mode is True:
E, S = s_es.shape
mu_s = pd.DataFrame(s_es).mean(axis=0) # mean along e
simga_s = pd.DataFrame(s_es).std(ddof=1, axis=0) # std along e
s_es = (s_es - np.tile(mu_s, (E, 1))) / np.tile(simga_s, (E, 1))
if bias_offset is True:
E, S = s_es.shape
# video-by-video, estimate MOS by averageing over subjects
s_e = pd.DataFrame(s_es).mean(axis=1) # mean along s
# subject by subject, estimate subject bias by comparing
# against MOS
delta_es = s_es - np.tile(s_e, (S, 1)).T
delta_s = pd.DataFrame(delta_es).mean(axis=0) # mean along e
# remove bias from opinion scores
s_es = s_es - np.tile(delta_s, (E, 1))
ret['bias_offset_estimate'] = delta_s
if subject_rejection is True:
E, S = s_es.shape
ps = np.zeros(S)
qs = np.zeros(S)
for s_e in s_es:
s_e_notnan = s_e[~np.isnan(s_e)]
mu = np.mean(s_e_notnan)
sigma = np.std(s_e_notnan)
kurt = stats.kurtosis(s_e_notnan, fisher=False)
if 2 <= kurt and kurt <= 4:
for idx_s, s in enumerate(s_e):
if not np.isnan(s):
if s >= mu + 2 * sigma:
ps[idx_s] += 1
if s <= mu - 2 * sigma:
qs[idx_s] += 1
else:
for idx_s, s in enumerate(s_e):
if not np.isnan(s):
if s >= mu + np.sqrt(20) * sigma:
ps[idx_s] += 1
if s <= mu - np.sqrt(20) * sigma:
qs[idx_s] += 1
rejections = []
acceptions = []
reject_1st_stats = []
reject_2nd_stats = []
for idx_s, subject in zip(list(range(S)), list(range(S))):
reject_1st_stat = (ps[idx_s] + qs[idx_s]) / E
reject_2nd_stat = np.abs((ps[idx_s] - qs[idx_s]) / (ps[idx_s] + qs[idx_s]))
reject_1st_stats.append(reject_1st_stat)
reject_2nd_stats.append(reject_2nd_stat)
if reject_1st_stat > 0.05 and reject_2nd_stat < 0.3:
rejections.append(subject)
else:
acceptions.append(subject)
s_es = s_es[:, acceptions]
observer_rejected = np.array([False for _ in range(S)])
observer_rejected[rejections] = True
ret['observer_rejected'] = observer_rejected
ret['observer_rejected_1st_stats'] = reject_1st_stats
ret['observer_rejected_2nd_stats'] = reject_2nd_stats
ret['opinion_score_2darray'] = s_es
ret['original_opinion_score_2darray'] = original_opinion_score_2darray
return ret
@staticmethod
def _postprocess_model_result(result, **kwargs):
# normalize_final: True - do normalization on final quality score
# False - don't do
normalize_final = kwargs['normalize_final'] if 'normalize_final' in kwargs else False
# transform_final: True - do (linear or other) transform on final quality score
# False - don't do
transform_final = kwargs['transform_final'] if 'transform_final' in kwargs else None
assert 'quality_scores' in result
if normalize_final is False:
pass
else:
quality_scores = np.array(result['quality_scores'])
quality_scores = (quality_scores - np.mean(quality_scores)) / \
np.std(quality_scores)
result['quality_scores'] = list(quality_scores)
if transform_final is None:
pass
else:
quality_scores = np.array(result['quality_scores'])
output_scores = np.zeros(quality_scores.shape)
if 'p2' in transform_final:
# polynomial coef of order 2
output_scores += transform_final['p2'] * quality_scores * quality_scores
if 'p1' in transform_final:
# polynomial coef of order 1
output_scores += transform_final['p1'] * quality_scores
if 'p0' in transform_final:
# polynomial coef of order 0
output_scores += transform_final['p0']
result['quality_scores'] = list(output_scores)
class MosModel(SubjectiveModel):
"""
Mean Opinion Score (MOS) subjective model.
"""
TYPE = 'MOS'
VERSION = '1.0'
@classmethod
def _run_modeling(cls, dataset_reader, **kwargs):
ret = cls._get_opinion_score_2darray_with_preprocessing(dataset_reader, **kwargs)
os_2darray = ret['opinion_score_2darray']
original_os_2darray = ret['original_opinion_score_2darray']
result = cls._get_mos_and_stats(os_2darray, original_os_2darray)
if 'observer_rejected' in ret:
result['observer_rejected'] = ret['observer_rejected']
assert 'observer_rejected_1st_stats' in ret
assert 'observer_rejected_2nd_stats' in ret
result['observer_rejected_1st_stats'] = ret['observer_rejected_1st_stats']
result['observer_rejected_2nd_stats'] = ret['observer_rejected_2nd_stats']
return result
@classmethod
def _get_mos_and_stats(cls, os_2darray, original_os_2darray):
mos = np.nanmean(os_2darray, axis=1) # mean along s, ignore NaN
std = np.nanstd(os_2darray, axis=1, ddof=1) # sample std -- use ddof 1
mos_std = std / np.sqrt(
np.nansum(~np.isnan(os_2darray), axis=1)
) # std / sqrt(N), ignoring NaN
result = {'quality_scores': mos,
'quality_scores_std': mos_std,
'quality_scores_ci95': [list(1.95996 * mos_std), list(1.95996 * mos_std)],
'quality_ambiguity': std,
'raw_scores': os_2darray,
}
num_pvs, num_obs = os_2darray.shape
num_os = np.sum(~np.isnan(os_2darray))
result['reconstructions'] = cls._get_reconstructions(mos, num_obs)
original_num_pvs, original_num_obs = original_os_2darray.shape
original_num_os = np.sum(~np.isnan(original_os_2darray))
dof = cls._get_dof(original_num_pvs, original_num_obs) / original_num_os # dof per observation
result['dof'] = dof
loglikelihood = np.nansum(np.log(vectorized_gaussian(
os_2darray,
np.tile(mos, (num_obs, 1)).T,
np.tile(std, (num_obs, 1)).T,
))) / num_os # log-likelihood per observation
result['loglikelihood'] = loglikelihood
aic = 2 * dof - 2 * loglikelihood # aic per observation
result['aic'] = aic
bic = np.log(original_num_os) * dof - 2 * loglikelihood # bic per observation
result['bic'] = bic
return result
@classmethod
def _get_reconstructions(cls, x_e, S):
x_es_hat = np.tile(x_e, (S, 1)).T
return x_es_hat
@classmethod
def _get_dof(cls, E, S):
return E * 2
class DmosModel(MosModel):
"""
Differential Mean Opinion Score (DMOS) subjective model.
Use the formula:
DMOS = MOS + ref_score (e.g. 5.0) - MOS_of_ref_video
"""
TYPE = 'DMOS'
VERSION = '1.0'
def run_modeling(self, **kwargs):
# override SubjectiveModel._run_modeling
if 'dscore_mode' in kwargs and kwargs['dscore_mode'] is True:
assert False, '{} is already doing dscoring, no need to repeat.'.format(self.__class__.__name__)
kwargs2 = kwargs.copy()
kwargs2['dscore_mode'] = True
return super(DmosModel, self).run_modeling(**kwargs2)
class LiveDmosModel(SubjectiveModel):
"""
Differential Mean Opinion Score (DMOS) subjective model based on:
Study of Subjective and Objective Quality Assessment of Video,
<NAME>, <NAME>, <NAME> and <NAME>,
IEEE Trans. Image Processing, Vol. 19, No. 6, June 2010.
Difference is:
DMOS = MOS + ref_score (e.g. 5.0) - MOS_of_ref_video
instead of
DMOS = MOS_of_ref_video - MOS
"""
TYPE = 'LIVE_DMOS'
VERSION = '1.0'
@classmethod
def _run_modeling(cls, dataset_reader, **kwargs):
if 'dscore_mode' in kwargs and kwargs['dscore_mode'] is True:
assert False, '{} is already doing dscoring, no need to repeat.'.format(cls.__class__.__name__)
if 'zscore_mode' in kwargs and kwargs['zscore_mode'] | |
<filename>mmderain/datasets/pipelines/augmentation.py
# This code is taken from https://github.com/open-mmlab/mmediting
# Modified by <NAME>
import copy
import math
import numbers
import cv2
import mmcv
import numpy as np
from ..registry import PIPELINES
@PIPELINES.register_module()
class Resize:
"""Resize data to a specific size for training or resize the images to fit
the network input regulation for testing.
When used for resizing images to fit network input regulation, the case is
that a network may have several downsample and then upsample operation,
then the input height and width should be divisible by the downsample
factor of the network.
For example, the network would downsample the input for 5 times with
stride 2, then the downsample factor is 2^5 = 32 and the height
and width should be divisible by 32.
Required keys are the keys in attribute "keys", added or modified keys are
"keep_ratio", "scale_factor", "interpolation" and the
keys in attribute "keys".
All keys in "keys" should have the same shape. "test_trans" is used to
record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be resized.
scale (float | Tuple[int]): If scale is Tuple(int), target spatial
size (h, w). Otherwise, target spatial size is scaled by input
size.
Note that when it is used, `size_factor` and `max_size` are
useless. Default: None
keep_ratio (bool): If set to True, images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: False.
Note that it is used togher with `scale`.
size_factor (int): Let the output shape be a multiple of size_factor.
Default:None.
Note that when it is used, `scale` should be set to None and
`keep_ratio` should be set to False.
max_size (int): The maximum size of the longest side of the output.
Default:None.
Note that it is used togher with `size_factor`.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: None.
output_keys (list[str] | None): The resized images. Default: None
Note that if it is not `None`, its length should be equal to keys.
"""
def __init__(self,
keys,
scale=None,
keep_ratio=False,
size_factor=None,
max_size=None,
interpolation='bilinear',
backend=None,
output_keys=None):
assert keys, 'Keys should not be empty.'
if output_keys:
assert len(output_keys) == len(keys)
else:
output_keys = keys
if size_factor:
assert scale is None, ('When size_factor is used, scale should ',
f'be None. But received {scale}.')
assert keep_ratio is False, ('When size_factor is used, '
'keep_ratio should be False.')
if max_size:
assert size_factor is not None, (
'When max_size is used, '
f'size_factor should also be set. But received {size_factor}.')
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif mmcv.is_tuple_of(scale, int):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
elif scale is not None:
raise TypeError(
f'Scale must be None, float or tuple of int, but got '
f'{type(scale)}.')
self.keys = keys
self.output_keys = output_keys
self.scale = scale
self.size_factor = size_factor
self.max_size = max_size
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.backend = backend
def _resize(self, img):
if self.keep_ratio:
img, self.scale_factor = mmcv.imrescale(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
else:
img, w_scale, h_scale = mmcv.imresize(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
self.scale_factor = np.array((w_scale, h_scale), dtype=np.float32)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.size_factor:
h, w = results[self.keys[0]].shape[:2]
new_h = h - (h % self.size_factor)
new_w = w - (w % self.size_factor)
if self.max_size:
new_h = min(self.max_size - (self.max_size % self.size_factor),
new_h)
new_w = min(self.max_size - (self.max_size % self.size_factor),
new_w)
self.scale = (new_w, new_h)
for key, out_key in zip(self.keys, self.output_keys):
results[out_key] = self._resize(results[key])
if len(results[out_key].shape) == 2:
results[out_key] = np.expand_dims(results[out_key], axis=2)
results['scale_factor'] = self.scale_factor
results['keep_ratio'] = self.keep_ratio
results['interpolation'] = self.interpolation
results['backend'] = self.backend
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, output_keys={self.output_keys}, '
f'scale={self.scale}, '
f'keep_ratio={self.keep_ratio}, size_factor={self.size_factor}, '
f'max_size={self.max_size},interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input data with a probability.
Reverse the order of elements in the given data with a specific direction.
The shape of the data is preserved, but the elements are reordered.
Required keys are the keys in attributes "keys", added or modified keys are
"flip", "flip_direction" and the keys in attributes "keys".
It also supports flipping a list of images with the same flip.
Args:
keys (list[str]): The images to be flipped.
flip_ratio (float): The propability to flip the images.
direction (str): Flip images horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
"""
_directions = ['horizontal', 'vertical']
def __init__(self, keys, flip_ratio=0.5, direction='horizontal'):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported.'
f'Currently support ones are {self._directions}')
self.keys = keys
self.flip_ratio = flip_ratio
self.direction = direction
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
flip = np.random.random() < self.flip_ratio
if flip:
for key in self.keys:
if isinstance(results[key], list):
for v in results[key]:
mmcv.imflip_(v, self.direction)
else:
mmcv.imflip_(results[key], self.direction)
results['flip'] = flip
results['flip_direction'] = self.direction
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, flip_ratio={self.flip_ratio}, '
f'direction={self.direction})')
return repr_str
@PIPELINES.register_module()
class Rotate:
"""Apply Rotate Transformation to image.
Args:
keys (list[str]): The images to be rotated.
angle (int | float): Rotation angle in degrees, positive values mean
clockwise rotation.
rotate_ratio (float): The propability to rotate the images.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
center (int | float | tuple[float]): Center point (w, h) of the
rotation in the source image. If None, the center of the
image will be used. Same in ``mmcv.imrotate``.
img_fill_val (int | float | tuple): The fill value for image border.
If float, the same value will be used for all the three
channels of image. If tuple, the should be 3 elements (e.g.
equals the number of channels for image).
"""
def __init__(self,
keys,
angle,
rotate_ratio,
scale=1,
center=None,
img_fill_val=128):
assert isinstance(angle, (int, float)), \
f'The angle must be type int or float. got {type(angle)}.'
assert isinstance(scale, (int, float)), \
f'The scale must be type int or float. got type {type(scale)}.'
if isinstance(center, (int, float)):
center = (center, center)
elif isinstance(center, tuple):
assert len(center) == 2, 'center with type tuple must have '\
f'2 elements. got {len(center)} elements.'
else:
assert center is None, 'center must be None or type int, '\
f'float or tuple, got type {type(center)}.'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
f'have 3 elements. got {len(img_fill_val)}.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError(
'img_fill_val must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
'all elements of img_fill_val should between range [0,255]. '\
f'got {img_fill_val}.'
self.keys = keys
self.angle = angle
self.rotate_ratio = rotate_ratio
self.scale = scale
self.center = center
self.img_fill_val = img_fill_val
def _rotate_img(self, img, angle):
"""Rotate the image.
Args:
results (dict): Result dict from loading pipeline.
angle (float): Rotation angle in degrees, positive values
mean clockwise rotation. Same in ``mmcv.imrotate``.
center (tuple[float], optional): Center point (w, h) of the
rotation. Same in ``mmcv.imrotate``.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
"""
h, w = img.shape[:2]
center = self.center
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
img_rotated = mmcv.imrotate(
img, angle, center, self.scale, border_value=self.img_fill_val)
return img_rotated
def __call__(self, results):
"""Call function to rotate images, bounding boxes, masks and semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
| |
<reponame>ProteinsWebTeam/project-skeleton
from rest_framework import status
from webfront.models import Taxonomy
from webfront.tests.InterproRESTTestCase import InterproRESTTestCase
class TaxonomyFixturesTest(InterproRESTTestCase):
def test_the_fixtures_are_loaded(self):
taxa = Taxonomy.objects.all()
self.assertEqual(taxa.count(), 6)
names = [t.scientific_name for t in taxa]
self.assertIn("ROOT", names)
self.assertNotIn("unicorn", names)
def test_can_get_the_taxonomy_count(self):
response = self.client.get("/api/taxonomy")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("taxa", response.data)
self.assertIn("uniprot", response.data["taxa"])
# self.assertIn("proteome", response.data["taxa"])
def test_can_read_taxonomy_list(self):
response = self.client.get("/api/taxonomy/uniprot")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_is_list_of_objects_with_key(response.data["results"], "metadata")
self.assertEqual(len(response.data["results"]), 6)
def test_can_read_taxonomy_id(self):
response = self.client.get("/api/taxonomy/uniprot/2")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_taxonomy_details(response.data["metadata"])
class TaxonomyProteomeFixturesTest(InterproRESTTestCase):
def test_can_read_taxonomy_with_proteome_list(self):
response = self.client.get("/api/taxonomy/uniprot/proteome")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_is_list_of_objects_with_key(response.data["results"], "metadata")
self._check_is_list_of_objects_with_key(response.data["results"], "proteomes")
self.assertEqual(len(response.data["results"]), 3)
def test_can_read_taxonomy_leaf_id_with_proteome_count(self):
response = self.client.get("/api/taxonomy/uniprot/40296/proteome")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("metadata", response.data)
self.assertIn("proteomes", response.data)
self.assertIn("uniprot", response.data["proteomes"])
self.assertEqual(response.data["proteomes"]["uniprot"], 1)
def test_can_read_taxonomy_leaf_id_with_proteomes(self):
response = self.client.get("/api/taxonomy/uniprot/40296/proteome/uniprot")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("metadata", response.data)
self.assertIn("proteome_subset", response.data)
self.assertEqual(len(response.data["proteome_subset"]), 1)
def test_can_read_taxonomy_node_id_with_proteomes(self):
response = self.client.get("/api/taxonomy/uniprot/2579/proteome/uniprot")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("metadata", response.data)
self.assertIn("proteome_subset", response.data)
self.assertEqual(len(response.data["proteome_subset"]), 2)
def test_can_read_proteome_id_including_tax_id(self):
lineage = [1, 2, 40296]
for taxon in lineage:
response = self.client.get(
"/api/taxonomy/uniprot/{}/proteome/uniprot/UP000030104".format(taxon)
)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "failed at " + str(taxon)
)
self.assertIn("proteomes", response.data)
self.assertEqual(len(response.data["proteomes"]), 1)
self.assertIn("accession", response.data["proteomes"][0])
self.assertIn("taxonomy", response.data["proteomes"][0])
class EntryTaxonomyTest(InterproRESTTestCase):
def test_can_get_the_taxonomy_count(self):
response = self.client.get("/api/entry/taxonomy")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_entry_count_overview(response.data)
self._check_taxonomy_count_overview(response.data)
def test_can_get_the_taxonomy_count_on_a_list(self):
acc = "IPR003165"
urls = [
"/api/entry/interpro/taxonomy/",
"/api/entry/pfam/taxonomy/",
"/api/entry/unintegrated/taxonomy/",
"/api/entry/interpro/pfam/taxonomy/",
"/api/entry/unintegrated/pfam/taxonomy/",
"/api/entry/interpro/" + acc + "/pfam/taxonomy",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
self._check_taxonomy_count_overview(result)
def test_urls_that_return_entry_with_taxonomy_count(self):
acc = "IPR003165"
pfam = "PF02171"
pfam_un = "PF17176"
urls = [
"/api/entry/interpro/" + acc + "/taxonomy",
"/api/entry/pfam/" + pfam + "/taxonomy",
"/api/entry/pfam/" + pfam_un + "/taxonomy",
"/api/entry/interpro/" + acc + "/pfam/" + pfam + "/taxonomy",
"/api/entry/interpro/pfam/" + pfam + "/taxonomy",
"/api/entry/unintegrated/pfam/" + pfam_un + "/taxonomy",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_entry_details(response.data["metadata"])
self.assertIn(
"taxa",
response.data,
"'taxa' should be one of the keys in the response",
)
self._check_taxonomy_count_overview(response.data)
def test_can_filter_entry_counter_with_taxonomy_db(self):
url = "/api/entry/taxonomy/uniprot"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self.assertIn(
"taxa",
response.data["entries"]["integrated"],
"'taxa' should be one of the keys in the response",
)
if response.data["entries"]["unintegrated"] != 0:
self.assertIn(
"taxa",
response.data["entries"]["unintegrated"],
"'taxa' should be one of the keys in the response",
)
def test_can_get_the_taxonomy_list_on_a_list(self):
acc = "IPR003165"
urls = [
"/api/entry/interpro/taxonomy/uniprot",
"/api/entry/unintegrated/taxonomy/uniprot",
"/api/entry/interpro/" + acc + "/pfam/taxonomy/uniprot",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(
response.data["results"], "taxonomy_subset"
)
for result in response.data["results"]:
for taxon in result["taxonomy_subset"]:
self._check_taxonomy_from_searcher(taxon)
def test_can_get_the_taxonomy_list_on_an_object(self):
urls = [
"/api/entry/interpro/IPR003165/taxonomy/uniprot",
"/api/entry/pfam/PF02171/taxonomy/uniprot",
"/api/entry/unintegrated/pfam/PF17176/taxonomy/uniprot",
"/api/entry/interpro/IPR003165/pfam/PF02171/taxonomy/uniprot",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_entry_details(response.data["metadata"])
self.assertIn("taxonomy_subset", response.data)
for org in response.data["taxonomy_subset"]:
self._check_taxonomy_from_searcher(org)
def test_can_filter_entry_counter_with_taxonomy_acc(self):
urls = ["/api/entry/taxonomy/uniprot/2579", "/api/entry/taxonomy/uniprot/40296"]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_entry_count_overview(response.data)
def test_can_get_the_taxonomy_object_on_a_list(self):
acc = "IPR003165"
urls = [
"/api/entry/interpro/taxonomy/uniprot/2579",
"/api/entry/unintegrated/taxonomy/uniprot/2579",
"/api/entry/unintegrated/taxonomy/uniprot/344612",
"/api/entry/interpro/" + acc + "/pfam/taxonomy/uniprot/344612",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
for org in result["taxa"]:
self._check_taxonomy_from_searcher(org)
def test_can_get_thetaxonomy_object_on_an_object(self):
urls = [
"/api/entry/interpro/IPR003165/taxonomy/uniprot/40296",
"/api/entry/unintegrated/pfam/PF17176/taxonomy/uniprot/344612",
"/api/entry/unintegrated/pfam/PF17176/taxonomy/uniprot/1",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_entry_details(response.data["metadata"])
self.assertIn("taxa", response.data)
for org in response.data["taxa"]:
self._check_taxonomy_from_searcher(org)
class ProteinTaxonomyTest(InterproRESTTestCase):
def test_can_get_the_taxonomy_count(self):
response = self.client.get("/api/protein/taxonomy")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_taxonomy_count_overview(response.data)
self._check_protein_count_overview(response.data)
def test_can_get_the_taxonomy_count_on_a_list(self):
urls = [
"/api/protein/reviewed/taxonomy/",
"/api/protein/unreviewed/taxonomy/",
"/api/protein/uniprot/taxonomy/",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
self._check_taxonomy_count_overview(result)
def test_urls_that_return_protein_with_taxonomy_count(self):
reviewed = "A1CUJ5"
unreviewed = "P16582"
urls = [
"/api/protein/uniprot/" + reviewed + "/taxonomy/",
"/api/protein/uniprot/" + unreviewed + "/taxonomy/",
"/api/protein/reviewed/" + reviewed + "/taxonomy/",
"/api/protein/unreviewed/" + unreviewed + "/taxonomy/",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_protein_details(response.data["metadata"])
self.assertIn(
"taxa",
response.data,
"'taxa' should be one of the keys in the response",
)
self._check_taxonomy_count_overview(response.data)
def test_can_filter_protein_counter_with_taxonomy_db(self):
url = "/api/protein/taxonomy/uniprot"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self.assertIn(
"proteins",
response.data["proteins"]["uniprot"],
"'proteins' should be one of the keys in the response",
)
self.assertIn(
"taxa",
response.data["proteins"]["uniprot"],
"'taxa' should be one of the keys in the response",
)
if "reviewed" in response.data["proteins"]:
self.assertIn(
"proteins",
response.data["proteins"]["reviewed"],
"'proteins' should be one of the keys in the response",
)
self.assertIn(
"taxa",
response.data["proteins"]["reviewed"],
"'taxa' should be one of the keys in the response",
)
if "unreviewed" in response.data["proteins"]:
self.assertIn(
"proteins",
response.data["proteins"]["unreviewed"],
"'proteins' should be one of the keys in the response",
)
self.assertIn(
"taxa",
response.data["proteins"]["unreviewed"],
"'taxa' should be one of the keys in the response",
)
def test_can_get_the_taxonomy_list_on_a_list(self):
urls = [
"/api/protein/unreviewed/taxonomy/uniprot",
"/api/protein/reviewed/taxonomy/uniprot",
"/api/protein/uniprot/taxonomy/uniprot",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(
response.data["results"], "taxonomy_subset"
)
for result in response.data["results"]:
for org in result["taxonomy_subset"]:
self._check_taxonomy_from_searcher(org)
def test_can_get_the_taxonomy_list_on_an_object(self):
urls = [
"/api/protein/uniprot/A0A0A2L2G2/taxonomy/uniprot",
"/api/protein/unreviewed/P16582/taxonomy/uniprot/",
"/api/protein/reviewed/A1CUJ5/taxonomy/uniprot",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_protein_details(response.data["metadata"])
self.assertIn("taxonomy_subset", response.data)
for org in response.data["taxonomy_subset"]:
self._check_taxonomy_from_searcher(org)
def test_can_filter_counter_with_taxonomy_acc(self):
urls = [
"/api/protein/taxonomy/uniprot/2579",
"/api/protein/taxonomy/uniprot/40296",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_protein_count_overview(response.data)
def test_can_get_the_taxonomy_object_on_a_list(self):
urls = [
"/api/protein/reviewed/taxonomy/uniprot/2579",
"/api/protein/uniprot/taxonomy/uniprot/344612",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
for org in result["taxa"]:
self._check_taxonomy_from_searcher(org)
def test_can_get_the_taxonomy_object_on_an_object(self):
urls = [
"/api/protein/uniprot/A0A0A2L2G2/taxonomy/uniprot/40296",
"/api/protein/unreviewed/P16582/taxonomy/uniprot/40296",
"/api/protein/reviewed/A1CUJ5/taxonomy/uniprot/2579",
"/api/protein/reviewed/A1CUJ5/taxonomy/uniprot/344612",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_protein_details(response.data["metadata"])
self.assertIn("taxa", response.data)
for org in response.data["taxa"]:
self._check_taxonomy_from_searcher(org)
class StructureTaxonomyTest(InterproRESTTestCase):
def test_can_get_the_taxonomy_count(self):
response = self.client.get("/api/structure/taxonomy")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_taxonomy_count_overview(response.data)
self._check_structure_count_overview(response.data)
def test_can_get_the_taxonomy_count_on_a_list(self):
url = "/api/structure/pdb/taxonomy/"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(response.data["results"], "metadata")
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
self._check_taxonomy_count_overview(result)
def test_urls_that_return_structure_with_taxonomy_count(self):
urls = [
"/api/structure/pdb/" + pdb + "/taxonomy/"
for pdb in ["1JM7", "2BKM", "1T2V"]
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_structure_details(response.data["metadata"])
self.assertIn(
"taxa",
response.data,
"'taxa' should be one of the keys in the response",
)
self._check_taxonomy_count_overview(response.data)
def test_can_filter_structure_counter_with_taxonomy_db(self):
url = "/api/structure/taxonomy/uniprot"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self.assertIn(
"structures",
response.data["structures"]["pdb"],
"'structures' should be one of the keys in the response",
)
self.assertIn(
"taxa",
response.data["structures"]["pdb"],
"'taxa' should be one of the keys in the response",
)
def test_can_get_the_taxonomy_list_on_a_list(self):
url = "/api/structure/pdb/taxonomy/uniprot"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(response.data["results"], "metadata")
self._check_is_list_of_objects_with_key(
response.data["results"], "taxonomy_subset"
)
for result in response.data["results"]:
for org in result["taxonomy_subset"]:
self._check_taxonomy_from_searcher(org)
def test_can_get_the_taxonomy_list_on_an_object(self):
urls = [
"/api/structure/pdb/1T2V/taxonomy/uniprot",
"/api/structure/pdb/1JZ8/taxonomy/uniprot",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_structure_details(response.data["metadata"])
self.assertIn("taxonomy_subset", response.data)
for org in response.data["taxonomy_subset"]:
self._check_taxonomy_from_searcher(org)
def test_can_filter_counter_with_taxonomy_acc(self):
urls = [
"/api/structure/taxonomy/uniprot/2579",
"/api/structure/taxonomy/uniprot/40296",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_structure_count_overview(response.data)
def test_can_get_the_taxonomy_object_on_a_list(self):
urls = [
"/api/structure/pdb/taxonomy/uniprot/2",
"/api/structure/pdb/taxonomy/uniprot/2579",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
for org in result["taxa"]:
self._check_taxonomy_from_searcher(org)
def test_can_get_the_taxonomy_object_on_an_object(self):
urls = [
"/api/structure/pdb/1T2V/taxonomy/uniprot/40296",
"/api/structure/pdb/1JZ8/taxonomy/uniprot/1",
"/api/structure/pdb/1JZ8/taxonomy/uniprot/40296",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_structure_details(response.data["metadata"])
self.assertIn("taxa", response.data)
for org in response.data["taxa"]:
self._check_taxonomy_from_searcher(org)
class SetTaxonomyTest(InterproRESTTestCase):
def test_can_get_the_taxonomy_count(self):
response = self.client.get("/api/set/taxonomy")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_set_count_overview(response.data)
self._check_taxonomy_count_overview(response.data)
def test_can_get_the_taxonomy_count_on_a_list(self):
urls = [
"/api/set/pfam/taxonomy",
# "/api/set/kegg/taxonomy",
# "/api/set/kegg/KEGG01/node/taxonomy",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_is_list_of_objects_with_key(
response.data["results"], "metadata"
)
self._check_is_list_of_objects_with_key(response.data["results"], "taxa")
for result in response.data["results"]:
self._check_taxonomy_count_overview(result)
def test_can_get_the_taxonomy_count_on_a_set(self):
urls = [
"/api/set/pfam/CL0001/taxonomy",
# "/api/set/kegg/KEGG01/taxonomy",
# "/api/set/kegg/KEGG01/node/KEGG01-1/taxonomy",
]
for url in urls:
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self._check_set_details(response.data["metadata"])
self.assertIn(
"taxa",
response.data,
"'taxa' should be one of the keys in the response",
)
self._check_taxonomy_count_overview(response.data)
def test_can_filter_set_counter_with_structure_db(self):
url = "/api/set/taxonomy/uniprot"
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_200_OK, "URL : [{}]".format(url)
)
self.assertIsInstance(response.data, dict)
# if "kegg" in response.data["sets"]:
# self.assertIn("taxa", response.data["sets"]["kegg"],
# "'taxa' should be one of the keys in the | |
<filename>depthai_demo.py
#!/usr/bin/env python3
import sys
import threading
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
import argparse
import json
import os
import time
import traceback
from functools import cmp_to_key
from itertools import cycle
from pathlib import Path
import platform
if platform.machine() == 'aarch64': # Jetson
os.environ['OPENBLAS_CORETYPE'] = "ARMV8"
from depthai_helpers.app_manager import App
if __name__ == "__main__":
if '--app' in sys.argv:
try:
app = App(appName=sys.argv[sys.argv.index('--app') + 1])
app.createVenv()
app.runApp()
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
try:
import cv2
import depthai as dai
import numpy as np
except Exception as ex:
print("Third party libraries failed to import: {}".format(ex))
print("Run \"python3 install_requirements.py\" to install dependencies or visit our installation page for more details - https://docs.luxonis.com/projects/api/en/latest/install/")
sys.exit(42)
from log_system_information import make_sys_report
from depthai_helpers.supervisor import Supervisor
from depthai_helpers.arg_manager import parseArgs
from depthai_helpers.config_manager import ConfigManager, DEPTHAI_ZOO, DEPTHAI_VIDEOS
from depthai_helpers.metrics import MetricManager
from depthai_helpers.version_check import checkRequirementsVersion
from depthai_sdk import FPSHandler, loadModule, getDeviceInfo, downloadYTVideo, Previews, createBlankFrame
from depthai_sdk.managers import NNetManager, SyncedPreviewManager, PreviewManager, PipelineManager, EncodingManager, BlobManager
args = parseArgs()
if args.noSupervisor and args.guiType == "qt":
if "QT_QPA_PLATFORM_PLUGIN_PATH" in os.environ:
os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH")
if "QT_QPA_FONTDIR" in os.environ:
os.environ.pop("QT_QPA_FONTDIR")
if not args.noSupervisor:
print('Using depthai module from: ', dai.__file__)
print('Depthai version installed: ', dai.__version__)
if not args.skipVersionCheck and platform.machine() not in ['armv6l', 'aarch64']:
checkRequirementsVersion()
sentryEnabled = False
try:
import sentry_sdk
sentry_sdk.init(
"https://159e328c631a4d3eb0248c0d92e41db3@o1095304.ingest.sentry.io/6114622",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
with_locals=False,
)
sentry_sdk.set_context("syslog", make_sys_report(anonymous=True, skipUsb=True, skipPackages=True))
sentryEnabled = True
except Exception as ex:
print("Logging and crash reporting disabled! {}".format(ex))
class Trackbars:
instances = {}
@staticmethod
def createTrackbar(name, window, minVal, maxVal, defaultVal, callback):
def fn(value):
if Trackbars.instances[name][window] != value:
callback(value)
for otherWindow, previousValue in Trackbars.instances[name].items():
if otherWindow != window and previousValue != value:
Trackbars.instances[name][otherWindow] = value
cv2.setTrackbarPos(name, otherWindow, value)
cv2.createTrackbar(name, window, minVal, maxVal, fn)
Trackbars.instances[name] = {**Trackbars.instances.get(name, {}), window: defaultVal}
cv2.setTrackbarPos(name, window, defaultVal)
noop = lambda *a, **k: None
class Demo:
DISP_CONF_MIN = int(os.getenv("DISP_CONF_MIN", 0))
DISP_CONF_MAX = int(os.getenv("DISP_CONF_MAX", 255))
SIGMA_MIN = int(os.getenv("SIGMA_MIN", 0))
SIGMA_MAX = int(os.getenv("SIGMA_MAX", 250))
LRCT_MIN = int(os.getenv("LRCT_MIN", 0))
LRCT_MAX = int(os.getenv("LRCT_MAX", 10))
def run_all(self, conf):
if conf.args.app is not None:
app = App(appName=conf.args.app)
self.onAppSetup(app)
app.createVenv()
self.onAppStart(app)
app.runApp(shouldRun=self.shouldRun)
else:
self.setup(conf)
self.run()
def __init__(self, displayFrames=True, onNewFrame = noop, onShowFrame = noop, onNn = noop, onReport = noop, onSetup = noop, onTeardown = noop, onIter = noop, onAppSetup = noop, onAppStart = noop, shouldRun = lambda: True, showDownloadProgress=None, collectMetrics=False):
self._openvinoVersion = None
self._displayFrames = displayFrames
self.toggleMetrics(collectMetrics)
self.onNewFrame = onNewFrame
self.onShowFrame = onShowFrame
self.onNn = onNn
self.onReport = onReport
self.onSetup = onSetup
self.onTeardown = onTeardown
self.onIter = onIter
self.shouldRun = shouldRun
self.showDownloadProgress = showDownloadProgress
self.onAppSetup = onAppSetup
self.onAppStart = onAppStart
def setCallbacks(self, onNewFrame=None, onShowFrame=None, onNn=None, onReport=None, onSetup=None, onTeardown=None, onIter=None, onAppSetup=None, onAppStart=None, shouldRun=None, showDownloadProgress=None):
if onNewFrame is not None:
self.onNewFrame = onNewFrame
if onShowFrame is not None:
self.onShowFrame = onShowFrame
if onNn is not None:
self.onNn = onNn
if onReport is not None:
self.onReport = onReport
if onSetup is not None:
self.onSetup = onSetup
if onTeardown is not None:
self.onTeardown = onTeardown
if onIter is not None:
self.onIter = onIter
if shouldRun is not None:
self.shouldRun = shouldRun
if showDownloadProgress is not None:
self.showDownloadProgress = showDownloadProgress
if onAppSetup is not None:
self.onAppSetup = onAppSetup
if onAppStart is not None:
self.onAppStart = onAppStart
def toggleMetrics(self, enabled):
if enabled:
self.metrics = MetricManager()
else:
self.metrics = None
def setup(self, conf: ConfigManager):
print("Setting up demo...")
self._conf = conf
self._rgbRes = conf.getRgbResolution()
self._monoRes = conf.getMonoResolution()
if self._conf.args.openvinoVersion:
self._openvinoVersion = getattr(dai.OpenVINO.Version, 'VERSION_' + self._conf.args.openvinoVersion)
self._deviceInfo = getDeviceInfo(self._conf.args.deviceId)
if self._conf.args.reportFile:
reportFileP = Path(self._conf.args.reportFile).with_suffix('.csv')
reportFileP.parent.mkdir(parents=True, exist_ok=True)
self._reportFile = reportFileP.open('a')
self._pm = PipelineManager(openvinoVersion=self._openvinoVersion, lowCapabilities=self._conf.lowCapabilities)
if self._conf.args.xlinkChunkSize is not None:
self._pm.setXlinkChunkSize(self._conf.args.xlinkChunkSize)
self._nnManager = None
if self._conf.useNN:
self._blobManager = BlobManager(
zooDir=DEPTHAI_ZOO,
zooName=self._conf.getModelName(),
progressFunc=self.showDownloadProgress
)
self._nnManager = NNetManager(inputSize=self._conf.inputSize, sync=self._conf.args.sync)
if self._conf.getModelDir() is not None:
configPath = self._conf.getModelDir() / Path(self._conf.getModelName()).with_suffix(f".json")
self._nnManager.readConfig(configPath)
self._nnManager.countLabel(self._conf.getCountLabel(self._nnManager))
self._pm.setNnManager(self._nnManager)
self._device = dai.Device(self._pm.pipeline.getOpenVINOVersion(), self._deviceInfo, usb2Mode=self._conf.args.usbSpeed == "usb2")
if sentryEnabled:
try:
from sentry_sdk import set_user
set_user({"mxid": self._device.getMxId()})
except:
pass
if self.metrics is not None:
self.metrics.reportDevice(self._device)
if self._deviceInfo.desc.protocol == dai.XLinkProtocol.X_LINK_USB_VSC:
print("USB Connection speed: {}".format(self._device.getUsbSpeed()))
self._conf.adjustParamsToDevice(self._device)
self._conf.adjustPreviewToOptions()
if self._conf.lowBandwidth:
self._pm.enableLowBandwidth(poeQuality=self._conf.args.poeQuality)
self._cap = cv2.VideoCapture(self._conf.args.video) if not self._conf.useCamera else None
self._fps = FPSHandler() if self._conf.useCamera else FPSHandler(self._cap)
if self._conf.useCamera:
pvClass = SyncedPreviewManager if self._conf.args.sync else PreviewManager
self._pv = pvClass(display=self._conf.args.show, nnSource=self._conf.getModelSource(), colorMap=self._conf.getColorMap(),
dispMultiplier=self._conf.dispMultiplier, mouseTracker=True, decode=self._conf.lowBandwidth and not self._conf.lowCapabilities,
fpsHandler=self._fps, createWindows=self._displayFrames, depthConfig=self._pm._depthConfig)
if self._conf.leftCameraEnabled:
self._pm.createLeftCam(self._monoRes, self._conf.args.monoFps,
orientation=self._conf.args.cameraOrientation.get(Previews.left.name),
xout=Previews.left.name in self._conf.args.show)
if self._conf.rightCameraEnabled:
self._pm.createRightCam(self._monoRes, self._conf.args.monoFps,
orientation=self._conf.args.cameraOrientation.get(Previews.right.name),
xout=Previews.right.name in self._conf.args.show)
if self._conf.rgbCameraEnabled:
self._pm.createColorCam(previewSize=self._conf.previewSize, res=self._rgbRes, fps=self._conf.args.rgbFps,
orientation=self._conf.args.cameraOrientation.get(Previews.color.name),
fullFov=not self._conf.args.disableFullFovNn,
xout=Previews.color.name in self._conf.args.show)
if self._conf.useDepth:
self._pm.createDepth(
self._conf.args.disparityConfidenceThreshold,
self._conf.getMedianFilter(),
self._conf.args.sigma,
self._conf.args.stereoLrCheck,
self._conf.args.lrcThreshold,
self._conf.args.extendedDisparity,
self._conf.args.subpixel,
useDepth=Previews.depth.name in self._conf.args.show or Previews.depthRaw.name in self._conf.args.show,
useDisparity=Previews.disparity.name in self._conf.args.show or Previews.disparityColor.name in self._conf.args.show,
useRectifiedLeft=Previews.rectifiedLeft.name in self._conf.args.show,
useRectifiedRight=Previews.rectifiedRight.name in self._conf.args.show,
)
self._encManager = None
if len(self._conf.args.encode) > 0:
self._encManager = EncodingManager(self._conf.args.encode, self._conf.args.encodeOutput)
self._encManager.createEncoders(self._pm)
if len(self._conf.args.report) > 0:
self._pm.createSystemLogger()
if self._conf.useNN:
self._nn = self._nnManager.createNN(
pipeline=self._pm.pipeline, nodes=self._pm.nodes, source=self._conf.getModelSource(),
blobPath=self._blobManager.getBlob(shaves=self._conf.shaves, openvinoVersion=self._nnManager.openvinoVersion),
useDepth=self._conf.useDepth, minDepth=self._conf.args.minDepth, maxDepth=self._conf.args.maxDepth,
sbbScaleFactor=self._conf.args.sbbScaleFactor, fullFov=not self._conf.args.disableFullFovNn,
)
self._pm.addNn(nn=self._nn, xoutNnInput=Previews.nnInput.name in self._conf.args.show,
xoutSbb=self._conf.args.spatialBoundingBox and self._conf.useDepth)
def run(self):
self._device.startPipeline(self._pm.pipeline)
self._pm.createDefaultQueues(self._device)
if self._conf.useNN:
self._nnManager.createQueues(self._device)
self._sbbOut = self._device.getOutputQueue("sbb", maxSize=1, blocking=False) if self._conf.useNN and self._conf.args.spatialBoundingBox else None
self._logOut = self._device.getOutputQueue("systemLogger", maxSize=30, blocking=False) if len(self._conf.args.report) > 0 else None
if self._conf.useDepth:
self._medianFilters = cycle([item for name, item in vars(dai.MedianFilter).items() if name.startswith('KERNEL_') or name.startswith('MEDIAN_')])
for medFilter in self._medianFilters:
# move the cycle to the current median filter
if medFilter == self._pm._depthConfig.postProcessing.median:
break
else:
self._medianFilters = []
if self._conf.useCamera:
cameras = self._device.getConnectedCameras()
if dai.CameraBoardSocket.LEFT in cameras and dai.CameraBoardSocket.RIGHT in cameras:
self._pv.collectCalibData(self._device)
self._cameraConfig = {
"exposure": self._conf.args.cameraExposure,
"sensitivity": self._conf.args.cameraSensitivity,
"saturation": self._conf.args.cameraSaturation,
"contrast": self._conf.args.cameraContrast,
"brightness": self._conf.args.cameraBrightness,
"sharpness": self._conf.args.cameraSharpness
}
if any(self._cameraConfig.values()):
self._updateCameraConfigs()
self._pv.createQueues(self._device, self._createQueueCallback)
if self._encManager is not None:
self._encManager.createDefaultQueues(self._device)
self._seqNum = 0
self._hostFrame = None
self._nnData = []
self._sbbRois = []
self.onSetup(self)
try:
while self.shouldRun():
self._fps.nextIter()
self.onIter(self)
self.loop()
except StopIteration:
pass
except Exception as ex:
if sentryEnabled:
from sentry_sdk import capture_exception
capture_exception(ex)
raise
finally:
self.stop()
def stop(self):
print("Stopping demo...")
self._device.close()
del self._device
self._pm.closeDefaultQueues()
if self._conf.useCamera:
self._pv.closeQueues()
if self._encManager is not None:
self._encManager.close()
if self._nnManager is not None:
self._nnManager.closeQueues()
if self._sbbOut is not None:
self._sbbOut.close()
if self._logOut is not None:
self._logOut.close()
self._fps.printStatus()
self.onTeardown(self)
def loop(self):
if self._conf.useCamera:
self._pv.prepareFrames(callback=self.onNewFrame)
if self._encManager is not None:
self._encManager.parseQueues()
if self._sbbOut is not None:
sbb = self._sbbOut.tryGet()
if sbb is not None:
self._sbbRois = sbb.getConfigData()
depthFrames = [self._pv.get(Previews.depthRaw.name), self._pv.get(Previews.depth.name)]
for depthFrame in depthFrames:
if depthFrame is None:
continue
for roiData in self._sbbRois:
roi = roiData.roi.denormalize(depthFrame.shape[1], depthFrame.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
# Display SBB on the disparity map
cv2.rectangle(depthFrame, (int(topLeft.x), int(topLeft.y)), (int(bottomRight.x), int(bottomRight.y)), self._nnManager._bboxColors[0], 2)
else:
readCorrectly, rawHostFrame = self._cap.read()
if not readCorrectly:
raise StopIteration()
self._nnManager.sendInputFrame(rawHostFrame, self._seqNum)
self._seqNum += 1
self._hostFrame = rawHostFrame
self._fps.tick('host')
if self._nnManager is not None:
newData, inNn = self._nnManager.parse()
if inNn is not None:
self.onNn(inNn, newData)
self._fps.tick('nn')
if newData is not None:
self._nnData = newData
if self._conf.useCamera:
if self._nnManager is not None:
self._nnManager.draw(self._pv, self._nnData)
self._pv.showFrames(callback=self._showFramesCallback)
elif self._hostFrame is not None:
debugHostFrame = self._hostFrame.copy()
if self._nnManager is not None:
self._nnManager.draw(debugHostFrame, self._nnData)
self._fps.drawFps(debugHostFrame, "host")
if self._displayFrames:
cv2.imshow("host", debugHostFrame)
if self._logOut:
logs = self._logOut.tryGetAll()
for log in logs:
self._printSysInfo(log)
if self._displayFrames:
key = cv2.waitKey(1)
if key == ord('q'):
raise StopIteration()
elif key == ord('m'):
nextFilter = next(self._medianFilters)
self._pm.updateDepthConfig(self._device, median=nextFilter)
if self._conf.args.cameraControlls:
update = True
if key == ord('t'):
self._cameraConfig["exposure"] = 10000 if self._cameraConfig["exposure"] is None else 500 if self._cameraConfig["exposure"] == 1 else min(self._cameraConfig["exposure"] + 500, 33000)
if self._cameraConfig["sensitivity"] is None:
self._cameraConfig["sensitivity"] = 800
elif key == ord('g'):
self._cameraConfig["exposure"] = 10000 if self._cameraConfig["exposure"] is None else max(self._cameraConfig["exposure"] - 500, 1)
if self._cameraConfig["sensitivity"] is None:
self._cameraConfig["sensitivity"] = 800
elif key == ord('y'):
self._cameraConfig["sensitivity"] = 800 if self._cameraConfig["sensitivity"] is None else min(self._cameraConfig["sensitivity"] + 50, 1600)
if self._cameraConfig["exposure"] is None:
self._cameraConfig["exposure"] = 10000
elif key == ord('h'):
self._cameraConfig["sensitivity"] = 800 if self._cameraConfig["sensitivity"] is None else max(self._cameraConfig["sensitivity"] - 50, 100)
if self._cameraConfig["exposure"] is None:
self._cameraConfig["exposure"] = 10000
elif key == ord('u'):
self._cameraConfig["saturation"] = 0 if self._cameraConfig["saturation"] is None else min(self._cameraConfig["saturation"] + 1, 10)
elif key == ord('j'):
self._cameraConfig["saturation"] = 0 if self._cameraConfig["saturation"] is None else max(self._cameraConfig["saturation"] - 1, -10)
elif key == ord('i'):
self._cameraConfig["contrast"] = 0 if self._cameraConfig["contrast"] is None else min(self._cameraConfig["contrast"] + 1, 10)
elif key == ord('k'):
self._cameraConfig["contrast"] = 0 if self._cameraConfig["contrast"] is None else max(self._cameraConfig["contrast"] - 1, -10)
elif key == ord('o'):
self._cameraConfig["brightness"] | |
contract values(5,40856194.45, 'p004')");
db.execute("insert into contract values(1,2834164.67, 'p005')");
db.execute("insert into contract values(2,5892143.22, 'p006')");
db.execute("insert into contract values(3,12783547.98, 'p007')");
db.execute("insert into contract values(6,76923123.65, 'p008')");
db.execute("insert into contract values(6,65897122.34, 'p009')");
db.execute("insert into contract values(3,13987652.78, 'p010')");
db.execute("insert into contract values(4,24678234.65, 'p011')");
db.execute("insert into contract values(2,458211.98, 'p012')");
db.execute("insert into contract values(3,12875123.67, 'p013')");
db.execute("insert into contract values(4,24132987.54, 'p014')");
db.execute("insert into contract values(6,60982341.43, 'p015')");
db.execute("insert into contract values(1,3567982.12, 'p016')");
db.execute("insert into contract values(3,13986342.87, 'p017')");
db.execute("insert into contract values(2,3456432.11, 'p018')");
db.execute("insert into contract values(1,1543678.42, 'p019')");
db.execute("insert into contract values(4,28767895.93, 'p020')");
db.execute("insert into contract values(1,1872343.34, 'p021')");
db.execute("insert into contract values(2,6234984.54, 'p022')");
db.execute("insert into contract values(4,3874351.73, 'p023')");
db.execute("insert into contract values(3,2873115.91, 'p024')");
db.execute("insert into contract values(1,3893478.22, 'p025')");
db.execute("insert into contract values(2,12457834.98, 'p026')");
db.execute("insert into contract values(4,28335910.43, 'p027')");
db.execute("insert into contract values(1,2384973.11, 'p028')");
db.execute("insert into contract values(3,21847384.44, 'p029')");
db.execute("insert into contract values(5,50327842.31, 'p030')");
db.execute("insert into contract values(5,45348729.48, 'p031')");
db.execute("insert into contract values(3,28761945.52, 'p032')");
db.execute("insert into contract values(4,38737895.56, 'p033')");
db.execute("insert into contract values(2,18767534.33, 'p034')");
db.execute("insert into contract values(1,6239841.87, 'p035')");
db.execute("insert into contract values(2,15437854.88, 'p036')");
db.execute("insert into contract values(3,30184766.24, 'p037')");
db.execute("insert into contract values(1,8767658.44, 'p038')");
db.execute("insert into contract values(4,29769895.01, 'p039')");
db.execute("insert into contract values(3,28767887.56, 'p040')");
db.execute("insert into contract values(6,72555895.04, 'p041')");
db.execute("insert into contract values(2,16884139.55, 'p042')");
db.execute("insert into contract values(6,60456339.75, 'p043')");
db.execute("insert into contract values(1,2443985.81, 'p044')");
db.execute("insert into contract values(4,29223895.23, 'p045')");
db.execute("insert into contract values(6,92769985.08, 'p046')");
db.execute("insert into contract values(1,2983728.44, 'p047')");
db.execute("insert into contract values(2,16887320.87, 'p048')");
db.execute("insert into contract values(3,24887992.90, 'p049')");
db.execute("insert into contract values(4,36784637.43, 'p050')");
db.execute("insert into contract values(6,82833194.56, 'p051')");
db.execute("insert into contract values(3,6756942.92, 'p052')");
db.execute("insert into contract values(2,7562880.13, 'p053')");
db.execute("insert into contract values(5,40857194.45, 'p054')");
db.execute("insert into contract values(1,2834664.67, 'p055')");
db.execute("insert into contract values(2,5892543.22, 'p056')");
db.execute("insert into contract values(3,12783547.98, 'p057')");
db.execute("insert into contract values(6,76922123.65, 'p058')");
db.execute("insert into contract values(6,65891122.34, 'p059')");
db.execute("insert into contract values(3,13982652.78, 'p060')");
db.execute("insert into contract values(4,24673234.65, 'p061')");
db.execute("insert into contract values(2,458261.98, 'p062')");
db.execute("insert into contract values(3,12876123.67, 'p063')");
db.execute("insert into contract values(4,24135987.54, 'p064')");
db.execute("insert into contract values(6,60984341.43, 'p065')");
db.execute("insert into contract values(1,3567982.12, 'p066')");
db.execute("insert into contract values(3,13981342.87, 'p067')");
db.execute("insert into contract values(2,3456432.11, 'p068')");
db.execute("insert into contract values(1,1543378.42, 'p069')");
db.execute("insert into contract values(4,28767895.93, 'p070')");
db.execute("insert into contract values(1,1872343.34, 'p071')");
db.execute("insert into contract values(2,6234984.54, 'p072')");
db.execute("insert into contract values(4,3874351.73, 'p073')");
db.execute("insert into contract values(3,2873115.91, 'p074')");
db.execute("insert into contract values(1,3893478.22, 'p075')");
db.execute("insert into contract values(2,12457834.98, 'p076')");
db.execute("insert into contract values(4,28335910.43, 'p077')");
db.execute("insert into contract values(1,2384973.11, 'p078')");
db.execute("insert into contract values(5,50337842.31, 'p080')");
db.execute("insert into contract values(5,45328729.48, 'p081')");
db.execute("insert into contract values(3,28713945.52, 'p082')");
db.execute("insert into contract values(4,38757895.56, 'p083')");
db.execute("insert into contract values(2,18737534.33, 'p084')");
db.execute("insert into contract values(1,6237841.87, 'p085')");
db.execute("insert into contract values(2,15432854.88, 'p086')");
db.execute("insert into contract values(3,30124766.24, 'p087')");
db.execute("insert into contract values(1,8267658.44, 'p088')");
db.execute("insert into contract values(4,22769895.01, 'p089')");
db.execute("insert into contract values(3,22767887.56, 'p090')");
db.execute("insert into contract values(6,72155895.04, 'p091')");
db.execute("insert into contract values(2,16864139.55, 'p092')");
db.execute("insert into contract values(6,60476339.75, 'p093')");
db.execute("insert into contract values(1,2448985.81, 'p094')");
db.execute("insert into contract values(4,29293895.23, 'p095')");
db.execute("insert into contract values(6,92719985.08, 'p096')");
db.execute("insert into contract values(1,2982728.44, 'p097')");
db.execute("insert into contract values(2,16837320.87, 'p098')");
db.execute("insert into contract values(3,24857992.90, 'p099')");
db.execute("insert into contract values(4,36764637.43, 'p100')");
db.execute("insert into hall_of_fame values(1996, 2, 648, 'p046')");
db.execute("insert into hall_of_fame values(1996, 2, 211, 'p047')");
db.execute("insert into hall_of_fame values(1997, 1, 315, 'p050')");
db.execute("insert into hall_of_fame values(1998, 1, 367, 'p048')");
db.execute("insert into hall_of_fame values(1999, 3, 312, 'p049')");
db.execute("insert into hall_of_fame values(1999, 3, 293, 'p035')");
db.execute("insert into hall_of_fame values(1999, 3, 215, 'p041')");
db.execute("insert into hall_of_fame values(2000, 1, 196, 'p039')");
db.execute("insert into hall_of_fame values(2001, 2, 204, 'p003')");
db.execute("insert into hall_of_fame values(2001, 2, 209, 'p029')");
db.execute("insert into hall_of_fame values(2002, 1, 394, 'p026')");
db.execute("insert into hall_of_fame values(2003, 1, 495, 'p009')");
db.execute("insert into hall_of_fame values(2004, 2, 531, 'p042')");
db.execute("insert into hall_of_fame values(2004, 2, 458, 'p025')");
db.execute("insert into hall_of_fame values(2005, 1, 286, 'p007')");
db.execute("insert into hall_of_fame values(2006, 3, 476, 'p014')");
db.execute("insert into hall_of_fame values(2006, 3, 541, 'p034')");
db.execute("insert into hall_of_fame values(2006, 3, 477, 'p016')");
db.execute("insert into hall_of_fame values(2007, 2, 593, 'p001')");
db.execute("insert into hall_of_fame values(2007, 2, 308, 'p037')");
db.execute("insert into position values('SG','p001')");
db.execute("insert into position values('PG','p002')");
db.execute("insert into position values('PF','p003')");
db.execute("insert into position values('SF','p004')");
db.execute("insert into position values('C','p005')");
db.execute("insert into position values('SG','p006')");
db.execute("insert into position values('PF','p007')");
db.execute("insert into position values('PG','p008')");
db.execute("insert into position values('SG','p009')");
db.execute("insert into position values('PG','p010')");
db.execute("insert into position values('SG','p011')");
db.execute("insert into position values('SF','p012')");
db.execute("insert into position values('C','p013')");
db.execute("insert into position values('PG','p014')");
db.execute("insert into position values('SF','p015')");
db.execute("insert into position values('PF','p016')");
db.execute("insert into position values('SG','p017')");
db.execute("insert into position values('C','p018')");
db.execute("insert into position values('PG','p019')");
db.execute("insert into position values('SG','p020')");
db.execute("insert into all_star_team values('1997','East','<NAME>')");
db.execute("insert into all_star_team values('1998','West','<NAME>')");
db.execute("insert into all_star_team values('1999','West','Shaq O Neal')");
db.execute("insert into all_star_team values('2000','West','<NAME>')");
db.execute("insert into all_star_team values('2001','West','<NAME>')");
db.execute("insert into all_star_team values('2002','West','<NAME>')");
db.execute("insert into all_star_team values('2003','West','<NAME>')");
db.execute("insert into all_star_team values('2004','West','Shaq O Neal')");
db.execute("insert into all_star_team values('2005','East','<NAME>')");
db.execute("insert into all_star_team values('2006','West','<NAME>')");
db.execute("insert into all_star_team values('2007','East','<NAME>')");
db.execute("insert into all_star_team values('2008','East','<NAME>')");
db.execute("insert into all_star_team values('2009','East','<NAME>')");
db.execute("insert into all_star_team values('2010','West','Kobe Bryant')");
db.execute("insert into all_star_team values('2011','West','<NAME>')");
db.execute("insert into all_star_team values('2012','West','<NAME>')");
db.execute("insert into all_star_team values('2013','East','<NAME>')");
db.execute("insert into all_star_team values('2014','West','<NAME>')");
db.execute("insert into all_star_team values('2015','East','<NAME>')");
db.execute("insert into all_star_team values('2016','West','<NAME>')");
db.execute("insert into all_star_team values('2017','East','<NAME>')");
db.execute("insert into voted_for values('p046','1997')");
db.execute("insert into voted_for values('p046','1998')");
db.execute("insert into voted_for values('p001','1998')");
db.execute("insert into voted_for values('p001','1999')");
db.execute("insert into voted_for values('p001','2000')");
db.execute("insert into voted_for values('p001','2001')");
db.execute("insert into voted_for values('p001','2002')");
db.execute("insert into voted_for values('p001','2003')");
db.execute("insert into voted_for values('p001','2004')");
db.execute("insert into voted_for values('p001','2005')");
db.execute("insert into voted_for values('p001','2006')");
db.execute("insert into voted_for values('p001','2007')");
db.execute("insert into voted_for values('p001','2008')");
db.execute("insert into voted_for values('p001','2009')");
db.execute("insert into voted_for values('p001','2010')");
db.execute("insert into voted_for values('p001','2011')");
db.execute("insert into voted_for values('p001','2012')");
db.execute("insert into voted_for values('p001','2013')");
db.execute("insert into voted_for values('p001','2014')");
db.execute("insert into voted_for values('p001','2015')");
db.execute("insert into voted_for values('p001','2016')");
db.execute("insert into voted_for values('p016','1998')");
db.execute("insert into voted_for values('p016','1999')");
db.execute("insert into voted_for values('p016','2000')");
db.execute("insert into voted_for values('p016','2001')");
db.execute("insert into voted_for values('p016','2002')");
db.execute("insert into voted_for values('p016','2003')");
db.execute("insert into voted_for values('p016','2004')");
db.execute("insert into voted_for values('p016','2005')");
db.execute("insert into voted_for values('p016','2006')");
db.execute("insert into voted_for values('p016','2007')");
db.execute("insert into voted_for values('p016','2008')");
db.execute("insert into voted_for values('p016','2009')");
db.execute("insert into voted_for values('p016','2010')");
db.execute("insert into voted_for values('p016','2011')");
db.execute("insert into voted_for values('p016','2012')");
db.execute("insert into voted_for values('p016','2013')");
db.execute("insert into voted_for values('p016','2014')");
db.execute("insert into voted_for values('p016','2015')");
db.execute("insert into voted_for values('p016','2016')");
db.execute("insert into voted_for values('p020','2017')");
db.execute("insert into voted_for values('p011','2017')");
db.execute("insert into voted_for values('p027','2017')");
db.execute("insert into voted_for values('p028','2017')");
db.execute("insert into voted_for values('p026','2017')");
db.execute("insert into voted_for values('p007','2017')");
db.execute("insert into sponsor values('Nike','Nike Media','Air Jordan','Air Fit','Nike Equipment','e01')");
db.execute("insert into sponsor values('Adidas','Adidas Media','P90x','Clima cool','Adi Train','e02')");
db.execute("insert into sponsor values('Puma','Puma Media','Blaze','Drifit','Puma Tech','e03')");
db.execute("insert into sponsor values('Under Armor','Armor Media','Ziko 20','Body Armor','Pro Tech','e04')");
db.execute("insert into sponsor values('New Balance','Balance Media','Gel Cumulus','Perspire','Balancequip','e05')");
db.execute("insert into sponsor values('Bwin','Bw Media','Nimbus','Clean','Win Training','e06')");
db.execute("insert into sponsor values('Qatar Airlines','Qatar Media','Inferno','Sense','Training Pro','e07')");
db.execute("insert into sponsor values('American Insurance','American Media','Cloud','Dew','TechX','e08')");
db.execute("insert into sponsor values('Slazenger','Slaz Media','Venom','Skin Z','SlazTech','e09')");
db.execute("insert into sponsor values('Bank of America','North Media','Kraken','Layer','American Equipment','e10')");
db.execute("insert into sponsor values('Shell','Shell Media','Power','Boost','Power Tech','w01')");
db.execute("insert into sponsor values('Canondale','CD Media','Hydro','Gravitas','Canon Equipment','w02')");
db.execute("insert into sponsor values('The North Face','Public Face','Float','Levitate','NorTech','w03')");
db.execute("insert into sponsor values('Walmart','Wal Media','Chi','X Touch','Equipment Z','w04')");
db.execute("insert into sponsor values('Target','Target Media','Energy','Mutate','Sense Equip','w05')");
db.execute("insert into sponsor values('Wells Fargo','Fargo Media','Chimera','Spear','Wellness Tech','w06')");
db.execute("insert into sponsor values('Mervyns','Merv Media','Katana','Blade','Merv Tech','w07')");
db.execute("insert into sponsor values('Best Buy','BB Media','Claw','Fang','Health Equipment','w08')");
db.execute("insert into sponsor values('CBS','CBS Media','GenX','Protect X','ProTrain','w09')");
db.execute("insert into sponsor values('KIA','KIA Media','Scimitar','Tyranitar','Train Max','w10')");
db.execute("insert into previous_team values('e03','e01','p001','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e01','e01','p002','2007-11-21','2010-11-21')");
db.execute("insert into previous_team values('e02','e01','p003','2010-11-21','2014-11-21')");
db.execute("insert into previous_team values('e03','w01','p004','2003-11-23','2004-11-21')");
db.execute("insert into previous_team values('e01','w01','p005','2004-11-21','2005-11-21')");
db.execute("insert into previous_team values('e05','w01','p006','2005-11-21','2015-11-21')");
db.execute("insert into previous_team values('w01','w01','p007','2015-11-21','2016-11-21')");
db.execute("insert into previous_team values('e03','w01','p008','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e01','w01','p009','2007-11-21','2008-11-21')");
db.execute("insert into previous_team values('w05','w01','p010','2008-11-21','2014-11-21')");
db.execute("insert into previous_team values('e07','w01','p011','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e02','w01','p012','2007-11-21','2008-11-21')");
db.execute("insert into previous_team values('e05','w01','p013','2008-11-21','2009-11-21')");
db.execute("insert into previous_team values('w03','w01','p014','2009-11-21','2010-11-21')");
db.execute("insert into previous_team values('w07','w05','p015','2001-11-21','2003-11-21')");
db.execute("insert into previous_team values('e04','w05','p016','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e05','w05','p017','2007-11-21','2013-11-21')");
db.execute("insert into previous_team values('w01','w05','p018','2013-11-21','2015-11-21')");
db.execute("insert into previous_team values('e01','w05','p019','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e03','e05','p037','2003-11-21','2016-11-21')");
db.execute("insert into games values('g01','2016-03-16','At&t Center')");
db.execute("insert into games values('g02','2016-03-21','Oracle Arena')");
db.execute("insert into games values('g03','2016-04-07','Quicken Loans Arena')");
db.execute("insert into games values('g04','2016-04-19','Staples Center (A)')");
db.execute("insert into games values('g05','2016-05-23','Staples Center (B)')");
db.execute("insert into games values('g06','2016-05-27','Toyota Center')");
db.execute("insert into games values('g07','2016-06-12','Amway Center')");
db.execute("insert into games values('g08','2016-06-17','United Center')");
db.execute("insert into games values('g09','2016-07-01','Energy Solutions Arena')");
db.execute("insert into games values('g10','2016-07-31','Quicken Loans Arena')");
db.execute("insert into games values('g11','2016-08-22','United Center')");
db.execute("insert into games values('g12','2016-08-29','United Center')");
db.execute("insert into games values('g13','2016-09-08','Oracle Arena')");
db.execute("insert into games values('g14','2016-09-29','At&t Center')");
db.execute("insert into games values('g15','2016-10-16','Staples Center (B)')");
db.execute("insert into games values('g16','2016-10-24','TD Center')");
db.execute("insert into games values('g17','2016-11-04','United Center')");
db.execute("insert into games values('g18','2016-11-14','TD Center')");
db.execute("insert into games values('g19','2016-12-20','Staples Center (A)')");
db.execute("insert into games values('g20','2016-12-25','Toyota Center')");
db.execute("insert into performances values('p020','g01',12,9,55.3,7,3)");
db.execute("insert into performances values('p016','g01',14,4,79.7,9,5)");
db.execute("insert into performances values('p051','g01',6,8,67.2,3,0)");
db.execute("insert into performances values('p052','g01',9,7,71.4,4,1)");
db.execute("insert into performances values('p051','g02',7,10,68.6,5,1)");
db.execute("insert into performances values('p052','g02',10,7,53.9,4,2)");
db.execute("insert into performances values('p026','g02',12,6,75.2,7,1)");
db.execute("insert into performances values('p027','g02',11,8,57.9,6,1)");
db.execute("insert into performances values('p027','g03',8,5,69.6,6,4)");
db.execute("insert into performances values('p026','g03',6,4,51.4,5,6)");
db.execute("insert into performances values('p020','g04',9,10,63.8,9,2)");
db.execute("insert into performances values('p008','g05',13,9,72.7,6,1)");
db.execute("insert into performances values('p035','g06',14,6,78.1,4,5)");
db.execute("insert into performances values('p011','g07',12,5,60.0,8,3)");
db.execute("insert into performances values('p046','g08',8,10,64.6,5,1)");
db.execute("insert into performances values('p001','g09',6,9,59.2,7,6)");
db.execute("insert into performances values('p051','g10',7,4,65.6,8,4)");
db.execute("insert into performances values('p046','g11',10,7,53.6,9,2)");
db.execute("insert into performances values('p049','g12',9,5,76.1,4,6)");
db.execute("insert into performances values('p011','g13',14,6,68.8,4,5)");
db.execute("insert into take_part_in values('w07','w04','g01')");
db.execute("insert into take_part_in values('e01','w07','g02')");
db.execute("insert into take_part_in values('w01','e01','g03')");
db.execute("insert into take_part_in values('w04','w01','g04')");
db.execute("insert into take_part_in values('e03','w02','g05')");
db.execute("insert into take_part_in values('e02','w03','g06')");
db.execute("insert into take_part_in values('w03','e04','g07')");
db.execute("insert into take_part_in values('e04','e05','g08')");
db.execute("insert into take_part_in values('w01','w05','g09')");
db.execute("insert into take_part_in values('w07','e03','g10')");
db.execute("insert into take_part_in values('w01','e05','g11')");
db.execute("insert into take_part_in values('e03','e05','g12')");
db.execute("insert into take_part_in values('w03','w07','g13')");
db.execute("insert into take_part_in values('e03','w04','g14')");
db.execute("insert into take_part_in values('e01','w02','g15')");
db.execute("insert into take_part_in values('w03','e02','g16')");
db.execute("insert into take_part_in values('w07','e05','g17')");
db.execute("insert into take_part_in values('e04','e02','g18')");
db.execute("insert into take_part_in values('e05','w01','g19')");
db.execute("insert into take_part_in values('e01','w03','g20')");
db.execute("insert into coach values('c01','Tyronn','Lue','e01')");
db.execute("insert into coach values('c02','Brad','Stevens','e02')");
db.execute("insert into coach values('c03','Eric','Spoelstra','e03')");
db.execute("insert into coach values('c04','Frank','Vogel','e04')");
db.execute("insert into coach values('c05','Fred','Hoiberg','e05')");
db.execute("insert into coach values('c06','Jason','Kidd','e06')");
db.execute("insert into coach values('c07','Jeff','Hornacek','e07')");
db.execute("insert into coach values('c08','Nate','McMillan','e08')");
db.execute("insert into coach values('c09','Brett','Brown','e09')");
db.execute("insert into coach values('c10','Stan','<NAME>','e10')");
db.execute("insert into coach values('c11','Luke','Walton','w01')");
db.execute("insert into coach values('c12','Doc','Rivers','w02')");
db.execute("insert into coach values('c13','Mike','D`Antoni','w03')");
db.execute("insert into coach values('c14','Gregg','Popovich','w04')");
db.execute("insert into coach values('c15','Quin','Snyder','w05')");
db.execute("insert into coach values('c16','Michael','Malone','w06')");
db.execute("insert into coach values('c17','Steve','Kerr','w07')");
db.execute("insert into coach values('c18','Billy','Donovan','w08')");
db.execute("insert into coach values('c19','Terry','Stotts','w09')");
db.execute("insert into coach values('c20','Tom','Thibodeau','w10')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Luke','Walton','w01')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Doc','Rivers', 'w02')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Mike' ,'D`Antoni', 'w03')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Gregg','Popovich', 'w04')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Quin','Snyder' , 'w05')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Michael' ,'Malone' , 'w06')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Steve','Kerr', 'w07')");
db.execute("insert into management values('<NAME>','K.Larry','<NAME>','Billy' ,'Donovan', 'w08')");
db.execute("insert into management values('<NAME>','<NAME>.','<NAME>','Terry','Stotts', 'w09')");
db.execute("insert into management values('<NAME>','T.Lewis','<NAME>','Tom' ,'Thibodeau' , 'w10')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>' ,'Tyronn' ,'Lue', 'e01')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>', 'Brad' ,'Stevens','e02')");
db.execute("insert into management values('<NAME>','<NAME>.','<NAME>','Erik' ,'Spoelstra' , 'e03')");
db.execute("insert into management values('<NAME>','S.Judy','<NAME>','Frank','Vogel' , 'e04')");
db.execute("insert into management | |
<filename>components/faker/faker/providers/person/de_DE/__init__.py
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{first_name_male}} {{last_name}} {{suffix}}',
'{{first_name_female}} {{last_name}} {{suffix}}',
'{{prefix_male}} {{first_name_male}} {{last_name}} {{suffix}}',
'{{prefix_female}} {{first_name_female}} {{last_name}} {{suffix}}',
)
first_names_male = (
'Abel', 'Abraham', 'Adalbero', 'Adam', 'Adamo', 'Adolfo', 'Adrian',
'Adriano', 'Adrianus', 'Adrien', 'Alain', 'Alajos', 'Alan', 'Albain',
'Alban', 'Albano', 'Alberto', 'Albin', 'Alec', 'Alejandro',
'Alessandro', 'Alessio', 'Alex', 'Alexander', 'Alexandre',
'Alexandros', 'Alexej', 'Alexis', 'Alfons', 'Alfonso', 'Aljoscha',
'Allan', 'Allen', 'Alois', 'Alon', 'Alonzo', 'Alphonse', 'Alwin',
'Amadeo', 'Amadeus', 'Amandus', 'Amos', 'Anatol', 'Anatole',
'Anatolij', 'Anders', 'Andi', 'Andor', 'Andre', 'Andreas', 'Andrej',
'Andrew', 'Andrijan', 'Andy', 'Angelus', 'Ansgar', 'Anthony',
'Antoine', 'Anton', 'Antonio', 'Araldo', 'Aram', 'Argus', 'Arjan',
'Armin', 'Arminio', 'Arnaldo', 'Arnault', 'Arndt', 'Arne', 'Arno',
'Arnold', 'Arrigo', 'Art', 'Arthur', 'Artur', 'Arturo', 'August',
'Auguste', 'Augustin', 'Aurelius', 'Axel', 'Balduin', 'Balthasar',
'Bardo', 'Barnabas', 'Barnard', 'Barney', 'Baruch', 'Basil',
'Basilius', 'Bastian', 'Bastien', 'Battista', 'Beatus', 'Beltrame',
'Beltran', 'Ben', 'Benedetto', 'Benedict', 'Benedikt', 'Bengt',
'Beniamino', 'Benignus', 'Benito', 'Benjamin', 'Benjy', 'Bennett',
'Benno', 'Benny', 'Benoit', 'Beppe', 'Bernard', 'Bernardo', 'Bernd',
'Bernhard', 'Bernie', 'Bert', 'Berthold', 'Bertoldo', 'Bertram',
'Bertrame', 'Bill', 'Billy', 'Birger', 'Bjarne', 'Björn', 'Bob',
'Bobby', 'Bodo', 'Bonifatius', 'Boris', 'Bosco', 'Brendan', 'Brian',
'Bruno', 'Bryan', 'Burkhard', 'Camillo', 'Camilo', 'Carl', 'Carlo',
'Carlos', 'Carol', 'Carsten', 'Casimir', 'Caspar', 'Cecil', 'Ceddric',
'Cedric', 'Celestino', 'Charles', 'Charlie', 'Chico', 'Chip', 'Chris',
'Christian', 'Christoph', 'Christophe', 'Christopher', 'Christy',
'Chuck', 'Cian', 'Cillian', 'Clarence', 'Clark', 'Clas', 'Claude',
'Claudio', 'Claudius', 'Claus', 'Clayton', 'Clemens', 'Cliff',
'Clifford', 'Clint', 'Clinton', 'Cody', 'Colin', 'Collin', 'Conan',
'Connor', 'Conny', 'Conor', 'Conrad', 'Constantine', 'Cooper',
'Cordell', 'Cornelius', 'Corvinus', 'Cristobal', 'Curd', 'Curt',
'Curtis', 'Curtiz', 'Cyril', 'Cyrill', 'Cäsar', 'Damian', 'Damon',
'Dan', 'Daniel', 'Daniele', 'Danilo', 'Danny', 'Dario', 'Darius',
'Dave', 'David', 'Davide', 'Dawson', 'Dean', 'Demetrius', 'Denis',
'Deniz', 'Dennis', 'Derek', 'Desiderius', 'Detlef', 'Detlev', 'Dick',
'Diego', 'Dieter', 'Dimitrij', 'Dirk', 'Dolf', 'Domenico', 'Domingo',
'Dominic', 'Dominik', 'Dominikus', 'Dominique', 'Donald', 'Donatello',
'Donato', 'Donatus', 'Dorian', 'Douglas', 'Dragan', 'Duarte', 'Duncan',
'Dylan', 'Earnest', 'Earvin', 'Eike', 'Eleasar', 'Elia', 'Elian',
'Elias', 'Elijah', 'Ellison', 'Elmar', 'Elroy', 'Emanuel', 'Emanuele',
'Emil', 'Emile', 'Emilian', 'Emiliano', 'Emilio', 'Emmanuel', 'Endrik',
'Enrico', 'Enrique', 'Enzo', 'Ephraim', 'Erasmus', 'Eric', 'Erik',
'Ermanno', 'Ernest', 'Ernestin', 'Ernesto', 'Eros', 'Errol', 'Etienne',
'Eugen', 'Eugene', 'Eugenio', 'Eusebius', 'Everett', 'Ezra', 'Fabiano',
'Fabien', 'Fabio', 'Fabius', 'Fabrice', 'Fabricius', 'Fabrizio',
'Falco', 'Falk', 'Falko', 'Faruk', 'Faustus', 'Favian', 'Federico',
'Federigo', 'Fedor', 'Felice', 'Feliciano', 'Felicien', 'Felipe',
'Felix', 'Felton', 'Feodor', 'Ferdinand', 'Fergus', 'Fernand',
'Fernando', 'Ferrante', 'Ferris', 'Fidel', 'Fidelio', 'Fidelis',
'Fidelius', 'Filippo', 'Finan', 'Finn', 'Fiore', 'Fjodor', 'Flavian',
'Flemming', 'Fletcher', 'Flint', 'Florens', 'Florentin', 'Florian',
'Florin', 'Florus', 'Floyd', 'Forrest', 'Forrester', 'Forster',
'Foster', 'Fox', 'Francesco', 'Francis', 'Francisco', 'Franco',
'Francois', 'Franek', 'Frank', 'Frankie', 'Franklin', 'Franziskus',
'Frasier', 'Frayne', 'Fred', 'Freddy', 'Frederic', 'Frederick',
'Frederik', 'Freeman', 'Fremont', 'Fridericus', 'Fridolin', 'Friedel',
'Frye', 'Gabriel', 'Gaetan', 'Gaetano', 'Gallus', 'Garcia', 'Garfield',
'Garin', 'Garnier', 'Garrick', 'Garrison', 'Garron', 'Garry', 'Garson',
'Gaspar', 'Gaspard', 'Gaspare', 'Gaston', 'Gastonne', 'Gates',
'Gauthier', 'Gavin', 'Gene', 'Geoffrey', 'Geoffroy', 'Geordi', 'Georg',
'George', 'Georges', 'Gerald', 'Geraldo', 'Gerard', 'Geraud', 'Gerd',
'Gereon', 'Germain', 'German', 'Germano', 'Gernot', 'Gerold',
'Geronimo', 'Gerrit', 'Gerry', 'Gert', 'Gerulf', 'Gerwin', 'Giacomo',
'Gian', 'Giancarlo', 'Gianni', 'Gibson', 'Gideon', 'Gil', 'Gilbert',
'Gilberto', 'Gilles', 'Gillian', 'Gino', 'Gioacchino', 'Giorgio',
'Giovanni', 'Giraldo', 'Gisbert', 'Gitano', 'Giuliano', 'Giulio',
'Giuseppe', 'Giusto', 'Glen', 'Glenn', 'Goliath', 'Goran', 'Gordon',
'Gordy', 'Goswin', 'Graciano', 'Graham', 'Grayson', 'Greg', 'Gregg',
'Gregoire', 'Gregor', 'Gregory', 'Griffin', 'Grover', 'Gualtier',
'Gualtiero', 'Guglielmo', 'Guido', 'Guillaume', 'Guillermo', 'Gunnar',
'Gunter', 'Gunther', 'Gus', 'Gustavo', 'Gustl', 'Gutierre', 'Guy',
'Götz', 'Günter', 'Günther', 'Hajo', 'Hamilton', 'Hamlet', 'Hampton',
'Hanley', 'Hannes', 'Hans', 'Harald', 'Hardy', 'Harley', 'Harlow',
'Harold', 'Haroun', 'Harrison', 'Harry', 'Harvey', 'Hasso', 'Hauke',
'Havel', 'Hector', 'Heiko', 'Heiner', 'Heino', 'Hektor', 'Helge',
'Helmut', 'Helmuth', 'Hendrick', 'Hendrik', 'Hennes', 'Henning',
'Henri', 'Henrick', 'Henrik', 'Henry', 'Herald', 'Herbie', 'Hercules',
'Herold', 'Herwig', 'Hieronymus', 'Hilarius', 'Holger', 'Holm',
'Homer', 'Horace', 'Horatio', 'Horaz', 'Howard', 'Howie', 'Hugh',
'Hugo', 'Humphrey', 'Hunter', 'Ignatius', 'Ignaz', 'Ignazio', 'Igor',
'Ilian', 'Ilja', 'Immanuel', 'Ingo', 'Ingolf', 'Ingvar', 'Irenäus',
'Irvin', 'Irving', 'Irwin', 'Isaac', 'Isaak', 'Isai', 'Isaiah',
'Isidor', 'Istvan', 'Ivan', 'Ivo', 'Jackson', 'Jacky', 'Jacob',
'Jacques', 'Jacquin', 'Jadon', 'Jago', 'Jaime', 'Jake', 'Jakob',
'Jamal', 'James', 'Jan', 'Janis', 'Jannes', 'Jannik', 'Janning',
'Janos', 'Janosch', 'Jaques', 'Jared', 'Jarik', 'Jarl', 'Jarno',
'Jaro', 'Jaromir', 'Jarrett', 'Jascha', 'Jason', 'Jasper', 'Jay',
'Jean', 'Jeff', 'Jefferson', 'Jeffrey', 'Jendrick', 'Jens', 'Jered',
'Jeremiah', 'Jeremias', 'Jeremie', 'Jeremy', 'Jerold', 'Jerom',
'Jerome', 'Jerrick', 'Jerry', 'Jesaja', 'Jesko', 'Jesse', 'Jim',
'Jimmy', 'Jirko', 'Jo', 'Joakim', 'Joao', 'Joaquin', 'Joe', 'Joel',
'Joey', 'John', 'Johnny', 'Jokim', 'Jonah', 'Jonas', 'Jonathan',
'Jonny', 'Jordan', 'Jordano', 'Jorge', 'Jose', 'Josef', 'Joseph',
'Josh', 'Joshua', 'Josias', 'Jost', 'Josua', 'Josue', 'Jourdain',
'Juan', 'Juanito', 'Jud', 'Jules', 'Julien', 'Julio', 'Julius',
'Jurij', 'Justin', 'Justinian', 'Justus', 'Jörg', 'Jürgen', 'Kain',
'Kaj', 'Kajetan', 'Kallistus', 'Karsten', 'Kasimir', 'Kaspar',
'Keamon', 'Keith', 'Ken', 'Kenan', 'Kenneth', 'Keno', 'Kersten',
'Kerwin', 'Kevin', 'Kian', 'Kilian', 'Kim', 'Kiran', 'Klaas', 'Klaus',
'Klemens', 'Kleopas', 'Knud', 'Knut', 'Kolja', 'Konrad', 'Konstantin',
'Korbin', 'Korbinian', 'Kordt', 'Kristian', 'Kristof', 'Kristoffer',
'Kuno', 'Kurt', 'Kyros', 'LLoyd', 'Lajos', 'Lambert', 'Lamberto',
'Larry', 'Lars', 'Laslo', 'Lasse', 'Laurent', 'Laurente', 'Laurentius',
'Laurenz', 'Laurenzo', 'Lawrence', 'Lazarus', 'Lazlo', 'Leander',
'Lee', 'Leif', 'Leigh', 'Lennart', 'Lenny', 'Lenz', 'Leo', 'Leon',
'Leonard', 'Leonardo', 'Leonce', 'Leone', 'Leonello', 'Leonhard',
'Leopold', 'Leopoldo', 'Leroy', 'Lesley', 'Lester', 'Leverett', 'Levi',
'Lew', 'Lewis', 'Lex', 'Liborius', 'Lienhard', 'Linus', 'Lion',
'Lionel', 'Lobo', 'Loic', 'Lorenz', 'Lorenzo', 'Loris', 'Lothaire',
'Lou', 'Louie', 'Louis', 'Lovis', 'Luc', 'Luca', 'Lucan', 'Lucas',
'Luciano', 'Lucien', 'Lucius', 'Ludovico', 'Ludwig', 'Luigi', 'Luis',
'Lukas', 'Luke', 'Lutger', 'Luther', 'Lutz', 'Lyonel', 'Maik', 'Malte',
'Malwin', 'Manolito', 'Manolo', 'Manuel', 'Marc', 'Marcel', 'Marcello',
'Marcellus', 'Marco', 'Marcus', 'Marek', 'Marian', 'Marin', 'Marino',
'Marinus', 'Mario', 'Marius', 'Mark', 'Markus', 'Marlon', 'Maro',
'Marten', 'Martin', 'Marvin', 'Massimo', 'Mathias', 'Mathieu',
'Mathis', 'Matt', 'Matteo', 'Matthes', 'Matthew', 'Matthias',
'Matthieu', 'Matthäus', 'Maurice', 'Mauritius', 'Mauritz', 'Maurizio',
'Mauro', 'Maurus', 'Max', 'Maxence', 'Maxi', 'Maxime', 'Maximilian',
'Maximilien', 'Melchior', 'Merlin', 'Michael', 'Michail', 'Michel',
'Michele', 'Mick', 'Mickey', 'Miguel', 'Mika', 'Mikael', 'Mike',
'Mikel', 'Miklos', 'Milan', 'Milo', 'Mirko', 'Miro', 'Miroslav',
'Mischa', 'Mitja', 'Morgan', 'Moritz', 'Morris', 'Morten', 'Nat',
'Nathan', 'Nathanael', 'Nathaniel', 'Nepomuk', 'Nero', 'Neron',
'Newton', 'Niccolo', 'Nicholas', 'Nick', 'Nicki', 'Nico', 'Nicola',
'Nicolai', 'Nicolaj', 'Nicolas', 'Niels', 'Nigel', 'Nikita', 'Niklas',
'Niklaus', 'Niko', 'Nikodemus', 'Nikolai', 'Nikolaus', 'Nils', 'Noah',
'Noel', 'Norbert', 'Norberto', 'Norman', 'Odin', 'Odo', 'Odysseus',
'Olaf', 'Oleg', 'Oliver', 'Olivier', 'Oliviero', 'Olof', 'Oluf',
'Omar', 'Omer', 'Orlando', 'Orson', 'Oskar', 'Osvaldo', 'Oswin',
'Otello', 'Othello', 'Otto', 'Ove', 'Owain', 'Owen', 'Paco', 'Paddy',
'Palmiro', 'Pancho', 'Paolo', 'Pascal', 'Pat', 'Patrice', 'Patricio',
'Patricius', 'Patrick', 'Patrizio', 'Patrizius', 'Paul', 'Paulin',
'Paulus', 'Pawel', 'Pedro', 'Peer', 'Pepe', 'Pepito', 'Peppone', 'Per',
'Percy', 'Perez', 'Pete', 'Peter', 'Phil', 'Philip', 'Philipp',
'Philippe', 'Philo', 'Piedro', 'Pier', 'Piero', 'Pierre', 'Piet',
'Pieter', 'Pietro', 'Pinkus', 'Pippin', 'Pitt', 'Pius', 'Placide',
'Placido', 'Placidus', 'Poldi', 'Quint', 'Quintin', 'Quintinus',
'Quintus', 'Quirin', 'Quirino', 'Raffaele', 'Raffaello', 'Raffaelo',
'Raimondo', 'Raimund', 'Raimundo', 'Rainer', 'Rainier', 'Ralf',
'Ralph', 'Ramon', 'Randolf', 'Randolph', 'Randy', 'Raoul', 'Raphael',
'Rasmus', 'Rasul', 'Raul', 'Ray', 'Raymond', 'Regnier', 'Reik',
'Reiner', 'Remo', 'Renato', 'Renatus', 'Renaud', 'Rene', 'Renja',
'Reto', 'Reynold', 'Ricardo', 'Riccardo', 'Rick', 'Ricky', 'Rico',
'Rinaldo', 'Robby', 'Robert', 'Roberto', 'Robin', 'Rocco', 'Rock',
'Rocky', 'Rod', 'Rodolfo', 'Rodolphe', 'Rodrigo', 'Rodrigue',
'Rodrique', 'Roger', 'Roland', 'Rolando', 'Rolf', 'Romain', 'Roman',
'Romano', 'Romeo', 'Romero', 'Ronald', 'Ronan', 'Ronny', 'Rory',
'Ross', 'Rowan', 'Rowland', 'Roy', 'Ruben', 'Rudolf', 'Rudolph',
'Ruggero', 'Rupert', 'Ryan', 'Salomon', 'Salomone', 'Salvador',
'Salvator', 'Salvatore', 'Sam', 'Sammy', 'Samuel', 'Samuele', 'Sander',
'Sandor', 'Sandro', 'Sandy', 'Sascha', 'Sauveur', 'Schorsch', 'Scipio',
'Scott', 'Sean', 'Sebastian', 'Sebastiano', 'Sebastien', 'Selim',
'Semjon', 'Sepp', 'Serenus', 'Serge', 'Sergej', 'Sergio', 'Sergius',
'Servatius', 'Severiano', 'Severin', 'Severo', 'Sidney', 'Sidonius',
'Silas', 'Silvain', 'Silvan', 'Silvano', 'Silvanus', 'Silverio',
'Silverius', 'Silvester', 'Silvestro', 'Silvio', 'Silvius', 'Simjon',
'Simon', 'Simone', 'Sinclair', 'Sixt', 'Sixtus', 'Slade', 'Solomon',
'Spencer', 'Stan', 'Stanislaus', 'Stanislaw', 'Stanley', 'Stefan',
'Stefano', 'Steffen', 'Sten', 'Stephan', 'Stephen', 'Steve', 'Steven',
'Stewart', 'Stig', 'Stuart', 'Sven', 'Sylvain', 'Sylvester', 'Söncke',
'Sören', 'Tam', 'Tarek', 'Tassilo', 'Tasso', 'Ted', 'Teddy',
'Teobaldo', 'Thaddäus', 'Theo', 'Theodor', 'Theodore', 'Thierry',
'Thimotheus', 'Thomas', 'Thommy', 'Thoralf', 'Thorben', 'Thore',
'Thorsten', 'Tiberio', 'Tiberius', 'Tibor', 'Till', 'Tim', 'Timmy',
'Timo', 'Timofej', 'Timon', 'Timoteo', 'Timothee', 'Timotheus',
'Timothy', 'Tin', 'Tito', 'Titus', 'Tizian', 'Tiziano', 'Tjade',
'Tjark', 'Tobi', 'Tobia', 'Tobiah', 'Tobias', 'Tobie', 'Tobis', 'Toby',
'Tom', 'Tommaso', 'Tommy', 'Toni', 'Tonio', 'Tony', 'Torben', 'Torin',
'Torsten', 'Tristan', 'Tycho', 'Tyler', 'Tyson', 'Udo', 'Ugo',
'Ugolino', 'Ulf', 'Uli', 'Ulli', 'Ulric', 'Ulrich', 'Ulrico',
'Umberto', 'Urbain', 'Urban', 'Urbano', 'Urias', 'Uriel', 'Ursus',
'Uwe', 'Valentiano', 'Valentin', 'Valentino', 'Valerian', 'Valerio',
'Valerius', 'Valery', 'Vasco', 'Veit', 'Veltin', 'Vernon', 'Vicente',
'Vico', 'Victor', 'Viktor', 'Vincent', 'Vincenzo', 'Vinzenez',
'Vinzenz', 'Virgil', 'Vitalis', 'Vito', 'Vittore', 'Vittoriano',
'Vittorio', 'Volker', 'Wallace', 'Walt', 'Warner', 'Warren', 'Wido',
'Wigand', 'Wilbur', 'Willi', 'William', 'Wilpert', 'Winston', 'Wolf',
'Wolfgang', 'Woodrow', 'Woody', 'Xaver',
)
first_names_female = (
'Abby', 'Abelina', 'Abigail', 'Adelaide', 'Adeline', 'Adina',
'Adriana', 'Adrienne', 'Afra', 'Agatha', 'Agnes', 'Aida', 'Aimee',
'Aischa', 'Albertine', 'Alea', 'Aleksandra', 'Alena', 'Alessa',
'Alessandra', 'Alessia', 'Alexa', 'Alexandra', 'Alexia', 'Alexis',
'Alice', 'Alicia', 'Alida', 'Alina', | |
#!/usr/bin/env python
import sys, math, random, signal, time
ttc = time.time()
counter = 0
def signal_handler(signal, frame):
print counter
signal.signal(signal.SIGTSTP, signal_handler)
sbox = (
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
)
sbox_mul2 = (
0xc6, 0xf8, 0xee, 0xf6, 0xff, 0xd6, 0xde, 0x91, 0x60, 0x2, 0xce, 0x56, 0xe7, 0xb5, 0x4d, 0xec,
0x8f, 0x1f, 0x89, 0xfa, 0xef, 0xb2, 0x8e, 0xfb, 0x41, 0xb3, 0x5f, 0x45, 0x23, 0x53, 0xe4, 0x9b,
0x75, 0xe1, 0x3d, 0x4c, 0x6c, 0x7e, 0xf5, 0x83, 0x68, 0x51, 0xd1, 0xf9, 0xe2, 0xab, 0x62, 0x2a,
0x8, 0x95, 0x46, 0x9d, 0x30, 0x37, 0xa, 0x2f, 0xe, 0x24, 0x1b, 0xdf, 0xcd, 0x4e, 0x7f, 0xea,
0x12, 0x1d, 0x58, 0x34, 0x36, 0xdc, 0xb4, 0x5b, 0xa4, 0x76, 0xb7, 0x7d, 0x52, 0xdd, 0x5e, 0x13,
0xa6, 0xb9, 0x0, 0xc1, 0x40, 0xe3, 0x79, 0xb6, 0xd4, 0x8d, 0x67, 0x72, 0x94, 0x98, 0xb0, 0x85,
0xbb, 0xc5, 0x4f, 0xed, 0x86, 0x9a, 0x66, 0x11, 0x8a, 0xe9, 0x4, 0xfe, 0xa0, 0x78, 0x25, 0x4b,
0xa2, 0x5d, 0x80, 0x5, 0x3f, 0x21, 0x70, 0xf1, 0x63, 0x77, 0xaf, 0x42, 0x20, 0xe5, 0xfd, 0xbf,
0x81, 0x18, 0x26, 0xc3, 0xbe, 0x35, 0x88, 0x2e, 0x93, 0x55, 0xfc, 0x7a, 0xc8, 0xba, 0x32, 0xe6,
0xc0, 0x19, 0x9e, 0xa3, 0x44, 0x54, 0x3b, 0xb, 0x8c, 0xc7, 0x6b, 0x28, 0xa7, 0xbc, 0x16, 0xad,
0xdb, 0x64, 0x74, 0x14, 0x92, 0xc, 0x48, 0xb8, 0x9f, 0xbd, 0x43, 0xc4, 0x39, 0x31, 0xd3, 0xf2,
0xd5, 0x8b, 0x6e, 0xda, 0x1, 0xb1, 0x9c, 0x49, 0xd8, 0xac, 0xf3, 0xcf, 0xca, 0xf4, 0x47, 0x10,
0x6f, 0xf0, 0x4a, 0x5c, 0x38, 0x57, 0x73, 0x97, 0xcb, 0xa1, 0xe8, 0x3e, 0x96, 0x61, 0xd, 0xf,
0xe0, 0x7c, 0x71, 0xcc, 0x90, 0x6, 0xf7, 0x1c, 0xc2, 0x6a, 0xae, 0x69, 0x17, 0x99, 0x3a, 0x27,
0xd9, 0xeb, 0x2b, 0x22, 0xd2, 0xa9, 0x7, 0x33, 0x2d, 0x3c, 0x15, 0xc9, 0x87, 0xaa, 0x50, 0xa5,
0x3, 0x59, 0x9, 0x1a, 0x65, 0xd7, 0x84, 0xd0, 0x82, 0x29, 0x5a, 0x1e, 0x7b, 0xa8, 0x6d, 0x2c
)
sbox_mul3 = (
0xa5, 0x84, 0x99, 0x8d, 0xd, 0xbd, 0xb1, 0x54, 0x50, 0x3, 0xa9, 0x7d, 0x19, 0x62, 0xe6, 0x9a,
0x45, 0x9d, 0x40, 0x87, 0x15, 0xeb, 0xc9, 0xb, 0xec, 0x67, 0xfd, 0xea, 0xbf, 0xf7, 0x96, 0x5b,
0xc2, 0x1c, 0xae, 0x6a, 0x5a, 0x41, 0x2, 0x4f, 0x5c, 0xf4, 0x34, 0x8, 0x93, 0x73, 0x53, 0x3f,
0xc, 0x52, 0x65, 0x5e, 0x28, 0xa1, 0xf, 0xb5, 0x9, 0x36, 0x9b, 0x3d, 0x26, 0x69, 0xcd, 0x9f,
0x1b, 0x9e, 0x74, 0x2e, 0x2d, 0xb2, 0xee, 0xfb, 0xf6, 0x4d, 0x61, 0xce, 0x7b, 0x3e, 0x71, 0x97,
0xf5, 0x68, 0x0, 0x2c, 0x60, 0x1f, 0xc8, 0xed, 0xbe, 0x46, 0xd9, 0x4b, 0xde, 0xd4, 0xe8, 0x4a,
0x6b, 0x2a, 0xe5, 0x16, 0xc5, 0xd7, 0x55, 0x94, 0xcf, 0x10, 0x6, 0x81, 0xf0, 0x44, 0xba, 0xe3,
0xf3, 0xfe, 0xc0, 0x8a, 0xad, 0xbc, 0x48, 0x4, 0xdf, 0xc1, 0x75, 0x63, 0x30, 0x1a, 0xe, 0x6d,
0x4c, 0x14, 0x35, 0x2f, 0xe1, 0xa2, 0xcc, 0x39, 0x57, 0xf2, 0x82, 0x47, 0xac, 0xe7, 0x2b, 0x95,
0xa0, 0x98, 0xd1, 0x7f, 0x66, 0x7e, 0xab, 0x83, 0xca, 0x29, 0xd3, 0x3c, 0x79, 0xe2, 0x1d, 0x76,
0x3b, 0x56, 0x4e, 0x1e, 0xdb, 0xa, 0x6c, 0xe4, 0x5d, 0x6e, 0xef, 0xa6, 0xa8, 0xa4, 0x37, 0x8b,
0x32, 0x43, 0x59, 0xb7, 0x8c, 0x64, 0xd2, 0xe0, 0xb4, 0xfa, 0x7, 0x25, 0xaf, 0x8e, 0xe9, 0x18,
0xd5, 0x88, 0x6f, 0x72, 0x24, 0xf1, 0xc7, 0x51, 0x23, 0x7c, 0x9c, 0x21, 0xdd, 0xdc, 0x86, 0x85,
0x90, 0x42, 0xc4, 0xaa, 0xd8, 0x5, 0x1, 0x12, 0xa3, 0x5f, 0xf9, 0xd0, 0x91, 0x58, 0x27, 0xb9,
0x38, 0x13, 0xb3, 0x33, 0xbb, 0x70, 0x89, 0xa7, 0xb6, 0x22, 0x92, 0x20, 0x49, 0xff, 0x78, 0x7a,
0x8f, 0xf8, 0x80, 0x17, 0xda, 0x31, 0xc6, 0xb8, 0xc3, 0xb0, 0x77, 0x11, 0xcb, 0xfc, 0xd6, 0x3a
)
sboxInv = (
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
)
def get_T_list(set_number):
if set_number >= 32 and set_number <= 47:
k = set_number - 32
x = k * 16
y = x + 16
return x, y
elif set_number >= 48 and set_number <= 63:
k = set_number - 48
x = k * 16
y = x + 16
return x, y
elif set_number >= 0 and set_number <= 15:
k = set_number
x = k * 16
y = x + 16
return x, y
elif set_number >= 16 and set_number <= 31:
k = set_number - 16
x = k * 16
y = x + 16
return x, y
key_round_10_valid = [111, 191, 99, 248, 253, 118, 140, 250, 35, 50, 175, 52, 148, 110, 88, 145]
f = open('data/last_round.txt', 'r')
plain_t = f.readline()
measure = []
while plain_t:
cipher_t = f.readline().strip()
pin_trace = f.readline().strip()
measure.append({"p": eval(plain_t), "c": eval(cipher_t), "pin": eval(pin_trace)})
f.readline()
f.readline()
plain_t = f.readline()
random.shuffle(measure)
key_round_10 = []
last_round_trace = measure[0]['pin'][-16:]
for i in xrange(16):
byte = {}
x, y = get_T_list(last_round_trace[i])
for j in xrange(x, y):
byte.update({ord(measure[0]['c'][i]) ^ sbox[j] : 0})
key_round_10.append(byte)
last_2_round_trace = measure[0]['pin'][-32:-16]
c_text = measure[0]['c']
#FIX SOME KEYS
#key_round_10[11] = [key_round_10_valid[11]]
#key_round_10[15] = [key_round_10_valid[15]]
#key_round_10[6] = [key_round_10_valid[6]]
'''
Group 2 Eq -----------------------------------------------------------------------
'''
x4_x5_x6_x7 = pow(2, 16) * [None]
counter = 0
x1, y1 = get_T_list(last_2_round_trace[4])
x2, y2 = get_T_list(last_2_round_trace[5])
x3, y3 = get_T_list(last_2_round_trace[6])
x4, y4 = get_T_list(last_2_round_trace[7])
for i1 in xrange(x1, y1):
for i2 in xrange(x2, y2):
for i3 in xrange(x3, y3):
for i4 in xrange(x4, y4):
x4_x5_x6_x7[counter] = ''.join(map(chr, (i1, i2, i3, | |
<reponame>dr1315/Collocation_v2<gh_stars>0
import os
import sys
import numpy as np
import pandas as pd
from pysolar.solar import get_altitude_fast
from pyorbital.orbital import get_observer_look
from pyorbital.astronomy import get_alt_az
import datetime as dt
from datetime import timezone
sys.path.append("/g/data/k10/dr1709/code/Personal/Tools")
import him8analysis as h8a
import collocation as col
### For NN Training and Validation ###
def normalise_data(dataframe):
norm_vals = {'1': [50., 100.], # 120.,
'2': [50., 100.], # 135.,
'3': [50., 100.], # 165.,
'4': [50., 100.], # 205.,
'5': [50., 100.], # 121.,
'6': [50., 100.], # 132.,
'7': [273.15, 70.], # 401.,
'8': [273.15, 70.], # 317.,
'9': [273.15, 70.], # 327.,
'10': [273.15, 70.], # 327.,
'11': [273.15, 70.], # 344.,
'12': [273.15, 70.], # 328.,
'13': [273.15, 70.], # 371.,
'14': [273.15, 70.], # 348.,
'15': [273.15, 70.], # 403.,
'16': [273.15, 70.], # 410.,
'LAT': [90., 180.],
'LON': [180., 360.],
'DATE': 366.,
'TIME': (24. * 3600 + 1.),
'ANGLES': 360.,
'SZA': 90.,
'OZA': 45.}
normalised_data = {}
### Normalise Band Inputs ###
for band_number in range(1, 16+1):
if band_number <= 4:
for value_type in ['Mean', 'Sigma']:
key = 'Himawari Band %s %s at 2km Resolution' % (str(band_number), value_type)
norm_data = (dataframe[key] - norm_vals[str(band_number)][0]) / norm_vals[str(band_number)][1]
normalised_data[key] = np.array(list(norm_data))
else:
key = 'Himawari Band %s %s at 2km Resolution' % (str(band_number), 'Value')
norm_data = (dataframe[key] - norm_vals[str(band_number)][0]) / norm_vals[str(band_number)][1]
normalised_data[key] = np.array(list(norm_data))
### Normalise Latitudes ###
norm_lats = (dataframe['Himawari Latitude'] + norm_vals['LAT'][0]) / norm_vals['LAT'][-1]
norm_lats = np.array(list(norm_lats))
normalised_data['Latitude'] = norm_lats
### Normalise Longitudes ###
norm_lons = (dataframe['Himawari Longitude'] + norm_vals['LON'][0]) / norm_vals['LON'][-1]
norm_lons = np.array(list(norm_lons))
normalised_data['Longitude'] = norm_lons
### Normalise Date and Time Inputs ###
# Date #
dtime_start = dataframe['Himawari Scene Start Time']
dtime_delta = dataframe['Himawari Scene End Time'] - dtime_start
dtime_avg = dtime_start + dtime_delta / 2
ydays = []
for d in dtime_avg:
yday = d.timetuple().tm_yday
ydays.append([yday])
ydays = np.array(ydays)
ydays = ydays / norm_vals['DATE']
normalised_data['Date'] = ydays
# Time #
dsecs = []
for t in dtime_avg:
dsec = (t.hour * 3600) + (t.minute * 60) + (t.second)
dsecs.append([dsec])
dsecs = np.array(dsecs)
dsecs = dsecs / norm_vals['TIME']
normalised_data['Time'] = dsecs
angles = ['Himawari Solar Zenith Angle', # -90 /90
# 'Himawari Solar Azimuth Angle',
# 'Himawari Observer Elevation Angle',
'Himawari Observer Zenith Angle', # -45 /45
# 'Himawari Observer Azimuth Angle'
]
angle = np.array(list(dataframe['Himawari Solar Zenith Angle'])) - norm_vals['SZA']
normalised_data['Himawari Solar Zenith Angle'] = angle / norm_vals['SZA']
angle = np.array(list(dataframe['Himawari Observer Zenith Angle'])) - norm_vals['OZA']
normalised_data['Himawari Observer Zenith Angle'] = angle / norm_vals['OZA']
# for angle in angles:
# normalised_data[angle] = np.array(list(dataframe[angle])) / norm_vals['ANGLES']
# if angle == 'Himawari Solar Azimuth Angle':
# normalised_data[angle] = (np.array(list(dataframe[angle])) + 180.) / norm_vals['ANGLES']
# else:
# normalised_data[angle] = np.array(list(dataframe[angle])) / norm_vals['ANGLES']
# print(normalised_data)
return normalised_data
def format_inputs(normalised_data):
inputs = [
'Himawari Band 1 Mean at 2km Resolution',
'Himawari Band 1 Sigma at 2km Resolution',
'Himawari Band 2 Mean at 2km Resolution',
'Himawari Band 2 Sigma at 2km Resolution',
'Himawari Band 3 Mean at 2km Resolution',
'Himawari Band 3 Sigma at 2km Resolution',
'Himawari Band 4 Mean at 2km Resolution',
'Himawari Band 4 Sigma at 2km Resolution',
'Himawari Band 5 Value at 2km Resolution',
'Himawari Band 6 Value at 2km Resolution',
'Himawari Band 7 Value at 2km Resolution',
'Himawari Band 8 Value at 2km Resolution',
'Himawari Band 9 Value at 2km Resolution',
'Himawari Band 10 Value at 2km Resolution',
'Himawari Band 11 Value at 2km Resolution',
'Himawari Band 12 Value at 2km Resolution',
'Himawari Band 13 Value at 2km Resolution',
'Himawari Band 14 Value at 2km Resolution',
'Himawari Band 15 Value at 2km Resolution',
'Himawari Band 16 Value at 2km Resolution',
# 'Latitude',
# 'Longitude',
# 'Date',
# 'Time',
'Himawari Solar Zenith Angle',
# 'Himawari Solar Azimuth Angle',
# 'Himawari Observer Elevation Angle',
'Himawari Observer Zenith Angle',
# 'Himawari Observer Azimuth Angle'
]
for input in inputs:
data = normalised_data[input]
normalised_data[input] = data.reshape(len(data), 1)
arr_list = [normalised_data[input] for input in inputs]
data_inputs = np.hstack(tuple(arr_list))
return data_inputs
def process_auxiliaries(dataframe):
print('Adding Auxiliary Information')
ODs = list(dataframe['CALIOP ODs for 532nm'])
sum_ODs = []
for i in ODs:
i[i == -9999.] = 0.
sum_ODs.append(sum(i))
sum_ODs = np.array(sum_ODs)
surface_types = list(dataframe['CALIOP IGBP Surface Types'])
surface_types = [i[-1] for i in surface_types]
surface_types = np.array(surface_types)
top_heights = (basic_height_regression_classifiers(dataframe) * 30.6) - 0.5
top_heights = top_heights.flatten()
lats = np.array(list(dataframe['Himawari Latitude']))
SZAs = np.array(list(dataframe['Himawari Solar Zenith Angle']))
OZAs = np.array(list(dataframe['Himawari Observer Zenith Angle']))
spatial_diffs = get_spatial_diff(dataframe)
print('Done')
return sum_ODs, surface_types, top_heights, lats, SZAs, OZAs, spatial_diffs
def get_sza(dataframe):
ODs = list(dataframe['CALIOP ODs for 532nm'])
dts = list(dataframe['CALIOP Pixel Scan Times'])
lats = list(dataframe['CALIOP Latitudes'])
lons = list(dataframe['CALIOP Longitudes'])
SZAs = []
print('Calculating Solar Zenith Angles')
n = 1
for i, j, k, l in zip(ODs, dts, lats, lons):
print('Calculating Angle %d/%d ' % (n, len(ODs)), end='\r')
thickest = np.argmax(i)
thick_obj_dt = j[thickest].replace(tzinfo=dt.timezone.utc)
thick_obj_lat = k[thickest]
thick_obj_lon = l[thickest]
SZAs += [90. - get_altitude_fast(thick_obj_lat, thick_obj_lon, thick_obj_dt)]
n+=1
SZAs = np.array(SZAs)
print('All %d Solar Zenith Angles Calculated ' % len(ODs))
return SZAs
def get_spatial_diff(dataframe):
him_lats = np.array(list(dataframe['Himawari Latitude']))
him_lons = np.array(list(dataframe['Himawari Longitude']))
him_lons = him_lons - 140.7
him_lons[him_lons <= -180.] += 360.
him_lons[him_lons > 180.] -= 360.
cal_lats = np.array([i[-1] for i in list(dataframe['CALIOP Latitudes'])])
cal_lons = np.array([i[-1] for i in list(dataframe['CALIOP Longitudes'])])
cal_lons = cal_lons - 140.7
cal_lons[cal_lons <= -180.] += 360.
cal_lons[cal_lons > 180.] -= 360.
him_lats = np.deg2rad(him_lats)
him_lons = np.deg2rad(him_lons)
cal_lats = np.deg2rad(cal_lats)
cal_lons = np.deg2rad(cal_lons)
dlat = (him_lats - cal_lats)
dlon = (him_lons - cal_lons)
a = (np.sin(dlat / 2) ** 2) + (np.cos(him_lats) * np.cos(cal_lats) * np.sin(dlon / 2) ** 2)
c = 2 * np.arcsin(np.sqrt(a))
return c*6372.8
def basic_binary_classifiers(dataframe):
features = list(dataframe['CALIOP Vertical Feature Mask'])
bin_features = []
for item in features:
if 0 in item:
bin_features.append(np.array([1., 0.]))
else:
bin_features.append(np.array([0., 1.]))
bin_features = np.array(bin_features)
print("# of 0's: %d" % np.sum(np.all(bin_features == np.array([1., 0.]), axis=1)))
print("# of 1's: %d" % np.sum(np.all(bin_features == np.array([0., 1.]), axis=1)))
return np.array(bin_features)
def basic_binary_cloud_classifiers(dataframe):
features = list(dataframe['CALIOP Vertical Feature Mask'])
bin_features = []
for item in features:
cloud_object = [i in range(1,8+1) for i in item]
cloud_in_pixel = sum(cloud_object) > 0
if cloud_in_pixel:
bin_features.append(np.array([0., 1.]))
else:
bin_features.append(np.array([1., 0.]))
bin_features = np.array(bin_features)
print("# of 0's: %d" % np.sum(np.all(bin_features == np.array([1., 0.]), axis=1)))
print("# of 1's: %d" % np.sum(np.all(bin_features == np.array([0., 1.]), axis=1)))
return np.array(bin_features)
def high_OD_binary_classifiers(dataframe):
features = list(dataframe['CALIOP ODs for 532nm'])
bin_features = []
for item in features:
if sum(item) > 0.3:
bin_features.append(np.array([1., 0.]))
else:
bin_features.append(np.array([0., 1.]))
bin_features = np.array(bin_features)
print(bin_features.shape, bin_features[:10])
print("# of 0's: %d" % np.sum(np.all(bin_features == np.array([1., 0.]), axis=1)))
print("# of 1's: %d" % np.sum(np.all(bin_features == np.array([0., 1.]), axis=1)))
return np.array(bin_features)
def clear_cloud_aerosol_mixed_classifiers(dataframe):
"""
[0] -> Clear
[1] -> Cloud only
[2] -> Aerosol only
[3] -> Mixed cloud and aerosol
:param dataframe:
:return:
"""
features = list(dataframe['CALIOP Vertical Feature Mask'])
bin_features = []
for item in features:
cloud_object = [i in range(1, 8 + 1) for i in item]
cloud_in_pixel = sum(cloud_object) > 0
aerosol_object = [i > 8 for i in item]
aerosol_in_pixel = sum(aerosol_object) > 0
if cloud_in_pixel and aerosol_in_pixel:
bin_features.append(np.array([0., 0., 0., 1.]))
elif cloud_in_pixel and not aerosol_in_pixel:
bin_features.append(np.array([0., 1., 0., 0.]))
elif not cloud_in_pixel and aerosol_in_pixel:
bin_features.append(np.array([0., 0., 1., 0.]))
else:
bin_features.append(np.array([1., 0., 0., 0.]))
bin_features = np.array(bin_features)
print("# of Clear: %d" % np.sum(np.all(bin_features == np.array([1., 0., 0., 0.]), axis=1)))
print("# of Cloud Only: %d" % np.sum(np.all(bin_features == np.array([0., 1., 0., 0.]), axis=1)))
print("# of Aerosol Only: %d" % np.sum(np.all(bin_features == np.array([0., 0., 1., 0.]), axis=1)))
print("# of Mixed: %d" % np.sum(np.all(bin_features == np.array([0., 0., 0., 1.]), axis=1)))
return np.array(bin_features)
def complex_classifiers(dataframe):
"""
[0] -> Clear
[1] -> Cloud only
[2] -> Aerosol only
[3] -> Cloud over Aerosol (change to 1 OD over n OD and >0.1km difference in height)
[4] -> Aerosol over Cloud (change to 1 OD over n OD and >0.1km difference in height)
[5] -> Fully Mixed (overlap in layers)
Future: Add thin over thin, thin over thick (and thick over thin, thick over thick?)
:param dataframe:
:return:
"""
features = | |
elif in_modified and not in_original:
self.add_cell_info(add_label = edit_value, frame = frame)
comparison = np.where(annotated != self.tracked[frame])
self.frames_changed = np.any(comparison)
self.tracked[frame] = annotated
def action_flood_contiguous(self, label, frame, x_location, y_location):
'''
flood fill a cell with a unique new label; alternative to watershed
for fixing duplicate label issue if cells are not touching
'''
img_ann = self.tracked[frame,:,:,0]
old_label = label
new_label = max(self.tracks) + 1
in_original = np.any(np.isin(img_ann, old_label))
filled_img_ann = flood_fill(img_ann, (int(y_location/self.scale_factor), int(x_location/self.scale_factor)), new_label)
self.tracked[frame,:,:,0] = filled_img_ann
in_modified = np.any(np.isin(filled_img_ann, old_label))
# update cell info dicts since labels are changing
self.add_cell_info(add_label=new_label, frame = frame)
if in_original and not in_modified:
self.del_cell_info(del_label = old_label, frame = frame)
def action_trim_pixels(self, label, frame, x_location, y_location):
'''
get rid of any stray pixels of selected label; pixels of value label
that are not connected to the cell selected will be removed from annotation in that frame
'''
img_ann = self.tracked[frame,:,:,0]
contig_cell = flood(image = img_ann, seed_point = (int(y_location/self.scale_factor), int(x_location/self.scale_factor)))
img_trimmed = np.where(np.logical_and(np.invert(contig_cell), img_ann == label), 0, img_ann)
comparison = np.where(img_trimmed != img_ann)
self.frames_changed = np.any(comparison)
self.tracked[frame,:,:,0] = img_trimmed
def action_fill_hole(self, label, frame, x_location, y_location):
'''
fill a "hole" in a cell annotation with the cell label. Doesn't check
if annotation at (y,x) is zero (hole to fill) because that logic is handled in
javascript. Just takes the click location, scales it to match the actual annotation
size, then fills the hole with label (using skimage flood_fill). connectivity = 1
prevents hole fill from spilling out into background in some cases
'''
# rescale click location -> corresponding location in annotation array
hole_fill_seed = (y_location // self.scale_factor, x_location // self.scale_factor)
# fill hole with label
img_ann = self.tracked[frame,:,:,0]
filled_img_ann = flood_fill(img_ann, hole_fill_seed, label, connectivity = 1)
self.tracked[frame,:,:,0] = filled_img_ann
self.frames_changed = True
def action_new_single_cell(self, label, frame):
"""
Create new label in just one frame
"""
old_label = label
new_label = max(self.tracks) + 1
# replace frame labels
self.tracked[frame] = np.where(self.tracked[frame] == old_label,
new_label, self.tracked[frame])
# replace fields
self.del_cell_info(del_label = old_label, frame = frame)
self.add_cell_info(add_label = new_label, frame = frame)
def action_new_track(self, label, frame):
"""
Replacing label - create in all subsequent frames
"""
old_label, start_frame = label, frame
new_label = max(self.tracks) + 1
if start_frame != 0:
# replace frame labels
for frame in self.tracked[start_frame:]:
frame[frame == old_label] = new_label
# replace fields
track_old = self.tracks[old_label]
track_new = self.tracks[new_label] = {}
idx = track_old["frames"].index(start_frame)
frames_before = track_old["frames"][:idx]
frames_after = track_old["frames"][idx:]
track_old["frames"] = frames_before
track_new["frames"] = frames_after
track_new["label"] = new_label
# only add daughters if they aren't in the same frame as the new track
track_new["daughters"] = []
for d in track_old["daughters"]:
if start_frame not in self.tracks[d]["frames"]:
track_new["daughters"].append(d)
track_new["frame_div"] = track_old["frame_div"]
track_new["capped"] = track_old["capped"]
track_new["parent"] = None
track_old["daughters"] = []
track_old["frame_div"] = None
track_old["capped"] = True
self.frames_changed = self.info_changed = True
def action_delete(self, label, frame):
"""
Deletes label from current frame only
"""
# Set frame labels to 0
ann_img = self.tracked[frame]
ann_img = np.where(ann_img == label, 0, ann_img)
self.tracked[frame] = ann_img
self.del_cell_info(del_label = label, frame = frame)
def action_set_parent(self, label_1, label_2):
"""
label_1 gave birth to label_2
"""
track_1 = self.tracks[label_1]
track_2 = self.tracks[label_2]
last_frame_parent = max(track_1['frames'])
first_frame_daughter = min(track_2['frames'])
if last_frame_parent < first_frame_daughter:
track_1["daughters"].append(label_2)
daughters = np.unique(track_1["daughters"]).tolist()
track_1["daughters"] = daughters
track_2["parent"] = label_1
if track_1["frame_div"] is None:
track_1["frame_div"] = first_frame_daughter
else:
track_1["frame_div"] = min(track_1["frame_div"], first_frame_daughter)
self.info_changed = True
def action_replace(self, label_1, label_2):
"""
Replacing label_2 with label_1
"""
# replace arrays
for frame in range(self.max_frames):
annotated = self.tracked[frame]
annotated = np.where(annotated == label_2, label_1, annotated)
self.tracked[frame] = annotated
# replace fields
track_1 = self.tracks[label_1]
track_2 = self.tracks[label_2]
for d in track_1["daughters"]:
self.tracks[d]["parent"] = None
track_1["frames"].extend(track_2["frames"])
track_1["frames"] = sorted(set(track_1["frames"]))
track_1["daughters"] = track_2["daughters"]
track_1["frame_div"] = track_2["frame_div"]
track_1["capped"] = track_2["capped"]
del self.tracks[label_2]
for _, track in self.tracks.items():
try:
track["daughters"].remove(label_2)
except ValueError:
pass
self.frames_changed = self.info_changed = True
def action_swap_single_frame(self, label_1, label_2, frame):
'''swap the labels of two cells in one frame, but do not
change any of the lineage information'''
ann_img = self.tracked[frame,:,:,0]
ann_img = np.where(ann_img == label_1, -1, ann_img)
ann_img = np.where(ann_img == label_2, label_1, ann_img)
ann_img = np.where(ann_img == -1, label_2, ann_img)
self.tracked[frame,:,:,0] = ann_img
self.frames_changed = True
def action_swap_tracks(self, label_1, label_2):
def relabel(old_label, new_label):
for frame in self.tracked:
frame[frame == old_label] = new_label
# replace fields
track_new = self.tracks[new_label] = self.tracks[old_label]
track_new["label"] = new_label
del self.tracks[old_label]
for d in track_new["daughters"]:
self.tracks[d]["parent"] = new_label
if track_new["parent"] is not None:
parent_track = self.tracks[track_new["parent"]]
parent_track["daughters"].remove(old_label)
parent_track["daughters"].append(new_label)
relabel(label_1, -1)
relabel(label_2, label_1)
relabel(-1, label_2)
self.frames_changed = self.info_changed = True
def action_watershed(self, label, frame, x1_location, y1_location, x2_location, y2_location):
# Pull the label that is being split and find a new valid label
current_label = label
new_label = max(self.tracks) + 1
# Locally store the frames to work on
img_raw = self.raw[frame,:,:,0]
img_ann = self.tracked[frame,:,:,0]
# Pull the 2 seed locations and store locally
# define a new seeds labeled img that is the same size as raw/annotation imgs
seeds_labeled = np.zeros(img_ann.shape)
# create two seed locations
seeds_labeled[int(y1_location/self.scale_factor),
int(x1_location/self.scale_factor)] = current_label
seeds_labeled[int(y2_location/self.scale_factor),
int(x2_location/self.scale_factor)] = new_label
# define the bounding box to apply the transform on and select appropriate sections of 3 inputs (raw, seeds, annotation mask)
props = regionprops(np.squeeze(np.int32(img_ann == current_label)))
minr, minc, maxr, maxc = props[0].bbox
# store these subsections to run the watershed on
img_sub_raw = np.copy(img_raw[minr:maxr, minc:maxc])
img_sub_ann = np.copy(img_ann[minr:maxr, minc:maxc])
img_sub_seeds = np.copy(seeds_labeled[minr:maxr, minc:maxc])
# contrast adjust the raw image to assist the transform
img_sub_raw_scaled = rescale_intensity(img_sub_raw)
# apply watershed transform to the subsections
ws = watershed(-img_sub_raw_scaled, img_sub_seeds, mask=img_sub_ann.astype(bool))
# did watershed effectively create a new label?
new_pixels = np.count_nonzero(np.logical_and(ws == new_label, img_sub_ann == current_label))
# if only a few pixels split, dilate them; new label is "brightest"
# so will expand over other labels and increase area
if new_pixels < 5:
ws = dilation(ws, disk(3))
# ws may only leave a few pixels of old label
old_pixels = np.count_nonzero(ws == current_label)
if old_pixels < 5:
# create dilation image so "dimmer" label is not eroded by "brighter" label
dilated_ws = dilation(np.where(ws==current_label, ws, 0), disk(3))
ws = np.where(dilated_ws==current_label, dilated_ws, ws)
# only update img_sub_ann where ws has changed label from current_label to new_label
img_sub_ann = np.where(np.logical_and(ws == new_label,img_sub_ann == current_label),
ws, img_sub_ann)
#reintegrate subsection into original mask
img_ann[minr:maxr, minc:maxc] = img_sub_ann
self.tracked[frame,:,:,0] = img_ann
#update cell_info dict only if new label was created with ws
if np.any(np.isin(self.tracked[frame,:,:,0], new_label)):
self.add_cell_info(add_label=new_label, frame = frame)
def action_save_track(self):
# clear any empty tracks before saving file
empty_tracks = []
for key in self.tracks:
if not self.tracks[key]['frames']:
empty_tracks.append(self.tracks[key]['label'])
for track in empty_tracks:
del self.tracks[track]
file = secure_filename(self.filename)
with tarfile.open(file, "w") as trks:
with tempfile.NamedTemporaryFile("w") as lineage_file:
json.dump(self.tracks, lineage_file, indent=1)
lineage_file.flush()
trks.add(lineage_file.name, "lineage.json")
with tempfile.NamedTemporaryFile() as raw_file:
np.save(raw_file, self.raw)
raw_file.flush()
trks.add(raw_file.name, "raw.npy")
with tempfile.NamedTemporaryFile() as tracked_file:
np.save(tracked_file, self.tracked)
tracked_file.flush()
trks.add(tracked_file.name, "tracked.npy")
try:
s3.upload_file(file, self.output_bucket, self.subfolders)
except Exception as e:
print("Something Happened: ", e, file=sys.stderr)
raise
#os.remove(file)
return "Success!"
def add_cell_info(self, add_label, frame):
'''
helper function for actions that add a cell to the trk
'''
#if cell already exists elsewhere in trk:
try:
old_frames = self.tracks[add_label]['frames']
updated_frames = np.append(old_frames, frame)
updated_frames = np.unique(updated_frames).tolist()
self.tracks[add_label].update({'frames': updated_frames})
#cell does not exist anywhere in trk:
except KeyError:
self.tracks.update({add_label: {}})
self.tracks[add_label].update({'label': int(add_label)})
self.tracks[add_label].update({'frames': [frame]})
self.tracks[add_label].update({'daughters': []})
self.tracks[add_label].update({'frame_div': None})
self.tracks[add_label].update({'parent': None})
self.tracks[add_label].update({'capped': False})
self.frames_changed = self.info_changed = True
def del_cell_info(self, del_label, frame):
'''
helper function for actions that remove a cell from the trk
'''
#remove cell from frame
old_frames = self.tracks[del_label]['frames']
updated_frames = np.delete(old_frames, np.where(old_frames == np.int64(frame))).tolist()
self.tracks[del_label].update({'frames': updated_frames})
#if that was the last frame, delete the entry for that cell
if self.tracks[del_label]['frames'] == []:
del self.tracks[del_label]
# If deleting lineage data, remove parent/daughter entries
for _, track in self.tracks.items():
try:
track["daughters"].remove(del_label)
except ValueError:
pass
if track["parent"] == del_label:
track["parent"] = None
self.frames_changed = self.info_changed = True
def consecutive(data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
def predict_zstack_cell_ids(img, next_img, threshold = 0.1):
'''
Predict labels for next_img based on intersection over union | |
<gh_stars>1-10
import os
import random
import shutil
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
def get_data_from_tf(tf_path, imf_norm_op):
feature = {'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
'image/format': tf.io.FixedLenFeature([], tf.string),
'image_name': tf.io.FixedLenFeature([], tf.string),
'image/encoded': tf.io.FixedLenFeature([], tf.string),
'image_feature': tf.io.FixedLenFeature([], tf.string)}
tfrecord_dataset = tf.data.TFRecordDataset(tf_path)
def _parse_image_function(key):
return tf.io.parse_single_example(key, feature)
CLAM_dataset = tfrecord_dataset.map(_parse_image_function)
image_features = list()
for tfrecord_value in CLAM_dataset:
img_feature = tf.io.parse_tensor(tfrecord_value['image_feature'], 'float32')
if imf_norm_op:
img_feature = tf.math.l2_normalize(img_feature)
slide_labels = tfrecord_value['label']
slide_label = int(slide_labels)
image_features.append(img_feature)
return image_features, slide_label
def most_frequent(List):
mf = max(set(List), key=List.count)
return mf
def tf_shut_up(no_warn_op=False):
if no_warn_op:
tf.get_logger().setLevel('ERROR')
else:
print('Are you sure you want to receive the annoying TensorFlow Warning Messages?', \
'\n', 'If not, check the value of your input prameter for this function and re-run it.')
def optimizer_func_options(weight_decay_op_name):
str_bool_dic = str_to_bool()
weight_decay_op = str_bool_dic[weight_decay_op_name]
wd_keys = ["AdamW", "SGDW", "LAMB", "NovoGrad", "RectifiedAdam"]
nwd_keys = ["ConditionalGradient", "LazyAdam", "ProximalAdagrad", "Yogi", "Adam",
"Adadelta", "Adagrad", "Adamax", "Ftrl", "Nadam", "RMSprop", "SGD"]
optimizer_func_dic = {"AdamW": tfa.optimizers.AdamW,
"SGDW": tfa.optimizers.SGDW,
"LAMB": tfa.optimizers.LAMB,
"NovoGrad": tfa.optimizers.NovoGrad,
"RectifiedAdam": tfa.optimizers.RectifiedAdam,
"ConditionalGradient": tfa.optimizers.ConditionalGradient,
"LazyAdam": tfa.optimizers.LazyAdam,
"ProximalAdagrad": tfa.optimizers.ProximalAdagrad,
"Yogi": tfa.optimizers.Yogi,
"Adam": tf.keras.optimizers.Adam,
"Adadelta": tf.keras.optimizers.Adadelta,
"Adagrad": tf.keras.optimizers.Adagrad,
"Adamax": tf.keras.optimizers.Adamax,
"Ftrl": tf.keras.optimizers.Ftrl,
"Nadam": tf.keras.optimizers.Nadam,
"RMSprop": tf.keras.optimizers.RMSprop,
"SGD": tf.keras.optimizers.SGD}
if weight_decay_op:
[optimizer_func_dic.pop(key) for key in nwd_keys]
else:
[optimizer_func_dic.pop(key) for key in wd_keys]
return optimizer_func_dic
def loss_func_options():
loss_func_dic = {"binary_crossentropy": tf.keras.losses.binary_crossentropy,
"hinge": tf.keras.losses.hinge,
"categorical_crossentropy": tf.keras.losses.categorical_crossentropy,
"categorical_hinge": tf.keras.losses.categorical_hinge,
"cosine_similarity": tf.keras.losses.cosine_similarity,
"huber": tf.keras.losses.huber,
"log_cosh": tf.keras.losses.log_cosh,
"poisson": tf.keras.losses.poisson,
"squared_hinge": tf.keras.losses.squared_hinge,
"contrastive": tfa.losses.contrastive_loss,
"pinball": tfa.losses.pinball_loss,
"sigmoid_focal_crossentropy": tfa.losses.sigmoid_focal_crossentropy}
return loss_func_dic
def load_optimizers(i_wd_op_name, b_wd_op_name, a_wd_op_name,
i_optimizer_name, b_optimizer_name, a_optimizer_name,
i_learn_rate, b_learn_rate, a_learn_rate,
i_l2_decay, b_l2_decay, a_l2_decay):
str_bool_dic = str_to_bool()
i_wd_op = str_bool_dic[i_wd_op_name]
b_wd_op = str_bool_dic[b_wd_op_name]
a_wd_op = str_bool_dic[a_wd_op_name]
i_tf_func_dic = optimizer_func_options(weight_decay_op_name=i_wd_op_name)
b_tf_func_dic = optimizer_func_options(weight_decay_op_name=b_wd_op_name)
c_tf_func_dic = optimizer_func_options(weight_decay_op_name=a_wd_op_name)
i_optimizer_func = i_tf_func_dic[i_optimizer_name]
b_optimizer_func = b_tf_func_dic[b_optimizer_name]
c_optimizer_func = c_tf_func_dic[a_optimizer_name]
if i_wd_op:
if i_optimizer_name == 'LAMB':
i_optimizer = i_optimizer_func(learning_rate=i_learn_rate, weight_decay_rate=i_l2_decay)
else:
i_optimizer = i_optimizer_func(learning_rate=i_learn_rate, weight_decay=i_l2_decay)
else:
i_optimizer = i_optimizer_func(learning_rate=i_learn_rate)
if b_wd_op:
if b_optimizer_name == 'LAMB':
b_optimizer = b_optimizer_func(learning_rate=b_learn_rate, weight_decay_rate=b_l2_decay)
else:
b_optimizer = b_optimizer_func(learning_rate=b_learn_rate, weight_decay=b_l2_decay)
else:
b_optimizer = b_optimizer_func(learning_rate=b_learn_rate)
if a_wd_op:
if a_optimizer_name == 'LAMB':
c_optimizer = c_optimizer_func(learning_rate=a_learn_rate, weight_decay_rate=a_l2_decay)
else:
c_optimizer = c_optimizer_func(learning_rate=a_learn_rate, weight_decay=a_l2_decay)
else:
c_optimizer = c_optimizer_func(learning_rate=a_learn_rate)
return i_optimizer, b_optimizer, c_optimizer
def load_loss_func(i_loss_func_name, b_loss_func_name):
tf_func_dic = loss_func_options()
i_loss_func = tf_func_dic[i_loss_func_name]
b_loss_func = tf_func_dic[b_loss_func_name]
return i_loss_func, b_loss_func
def str_to_bool():
str_bool_dic = {'True': True,
'False': False}
return str_bool_dic
def dataset_shuffle(dataset, path, percent):
"""
Input Arg:
dataset -> path where all tfrecord data stored
path -> path where you want to save training, testing, and validation data folder
"""
# return training, validation, and testing path name
train = path + '/train'
valid = path + '/valid'
test = path + '/test'
# create training, validation, and testing directory only if it is not existed
if not os.path.exists(train):
os.mkdir(os.path.join(path, 'train'))
if not os.path.exists(valid):
os.mkdir(os.path.join(path, 'valid'))
if not os.path.exists(test):
os.mkdir(os.path.join(path, 'test'))
total_num_data = len(os.listdir(dataset))
# only shuffle the data when train, validation, and test directory are all empty
if len(os.listdir(train)) == 0 & len(os.listdir(valid)) == 0 & len(os.listdir(test)) == 0:
train_names = random.sample(os.listdir(dataset), int(total_num_data * percent[0]))
for i in train_names:
train_srcpath = os.path.join(dataset, i)
shutil.copy(train_srcpath, train)
valid_names = random.sample(list(set(os.listdir(dataset)) - set(os.listdir(train))),
int(total_num_data * percent[1]))
for j in valid_names:
valid_srcpath = os.path.join(dataset, j)
shutil.copy(valid_srcpath, valid)
test_names = list(set(os.listdir(dataset)) - set(os.listdir(train)) - set(os.listdir(valid)))
for k in test_names:
test_srcpath = os.path.join(dataset, k)
shutil.copy(test_srcpath, test)
def ng_att_call(ng_att_net, img_features):
h = list()
A = list()
for i in img_features:
c_imf = ng_att_net[0](i)
h.append(c_imf)
for j in h:
a = ng_att_net[1](j)
A.append(a)
return h, A
def g_att_call(g_att_net, img_features):
h = list()
A = list()
for i in img_features:
c_imf = g_att_net[0](i)
h.append(c_imf)
for j in h:
att_v_output = g_att_net[1](j)
att_u_output = g_att_net[2](j)
att_input = tf.math.multiply(att_v_output, att_u_output)
a = g_att_net[3](att_input)
A.append(a)
return h, A
def generate_pos_labels(n_pos_sample):
return tf.fill(dims=[n_pos_sample, ], value=1)
def generate_neg_labels(n_neg_sample):
return tf.fill(dims=[n_neg_sample, ], value=0)
def ins_in_call(ins_classifier, h, A_I, top_k_percent, n_class):
n_ins = top_k_percent * len(h)
n_ins = int(n_ins)
pos_label = generate_pos_labels(n_pos_sample=n_ins)
neg_label = generate_neg_labels(n_neg_sample=n_ins)
ins_label_in = tf.concat(values=[pos_label, neg_label], axis=0)
A_I = tf.reshape(tf.convert_to_tensor(A_I), (1, len(A_I)))
top_pos_ids = tf.math.top_k(A_I, n_ins)[1][-1]
pos_index = list()
for i in top_pos_ids:
pos_index.append(i)
pos_index = tf.convert_to_tensor(pos_index)
top_pos = list()
for i in pos_index:
top_pos.append(h[i])
top_neg_ids = tf.math.top_k(-A_I, n_ins)[1][-1]
neg_index = list()
for i in top_neg_ids:
neg_index.append(i)
neg_index = tf.convert_to_tensor(neg_index)
top_neg = list()
for i in neg_index:
top_neg.append(h[i])
ins_in = tf.concat(values=[top_pos, top_neg], axis=0)
logits_unnorm_in = list()
logits_in = list()
for i in range(n_class * n_ins):
ins_score_unnorm_in = ins_classifier(ins_in[i])
logit_in = tf.math.softmax(ins_score_unnorm_in)
logits_unnorm_in.append(ins_score_unnorm_in)
logits_in.append(logit_in)
return ins_label_in, logits_unnorm_in, logits_in
def ins_out_call(ins_classifier, h, A_O, top_k_percent):
n_ins = top_k_percent * len(h)
n_ins = int(n_ins)
# get compressed 512-dimensional instance-level feature vectors for following use, denoted by h
A_O = tf.reshape(tf.convert_to_tensor(A_O), (1, len(A_O)))
top_pos_ids = tf.math.top_k(A_O, n_ins)[1][-1]
pos_index = list()
for i in top_pos_ids:
pos_index.append(i)
pos_index = tf.convert_to_tensor(pos_index)
top_pos = list()
for i in pos_index:
top_pos.append(h[i])
# mutually-exclusive -> top k instances w/ highest attention scores ==> false pos = neg
pos_ins_labels_out = generate_neg_labels(n_neg_sample=n_ins)
ins_label_out = pos_ins_labels_out
logits_unnorm_out = list()
logits_out = list()
for i in range(n_ins):
ins_score_unnorm_out = ins_classifier(top_pos[i])
logit_out = tf.math.softmax(ins_score_unnorm_out)
logits_unnorm_out.append(ins_score_unnorm_out)
logits_out.append(logit_out)
return ins_label_out, logits_unnorm_out, logits_out
def ins_call(m_ins_classifier, bag_label, h, A, n_class, top_k_percent, mut_ex):
for i in range(n_class):
ins_classifier = m_ins_classifier[i]
if i == bag_label:
A_I = list()
for j in range(len(A)):
a_i = A[j][0][i]
A_I.append(a_i)
ins_label_in, logits_unnorm_in, logits_in = ins_in_call(ins_classifier=ins_classifier,
h=h, A_I=A_I,
top_k_percent=top_k_percent,
n_class=n_class)
else:
if mut_ex:
A_O = list()
for j in range(len(A)):
a_o = A[j][0][i]
A_O.append(a_o)
ins_label_out, logits_unnorm_out, logits_out = ins_out_call(ins_classifier=ins_classifier,
h=h, A_O=A_O,
top_k_percent=top_k_percent)
else:
continue
if mut_ex:
ins_labels = tf.concat(values=[ins_label_in, ins_label_out], axis=0)
ins_logits_unnorm = logits_unnorm_in + logits_unnorm_out
ins_logits = logits_in + logits_out
else:
ins_labels = ins_label_in
ins_logits_unnorm = logits_unnorm_in
ins_logits = logits_in
return ins_labels, ins_logits_unnorm, ins_logits
def s_bag_h_slide(A, h):
# compute the slide-level representation aggregated per the attention score distribution for the mth class
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512)
SAR.append(sar)
slide_agg_rep = tf.math.add_n(SAR) # return h_[slide,m], shape be (2,512)
return slide_agg_rep
def s_bag_call(bag_classifier, bag_label, A, h, n_class):
slide_agg_rep = s_bag_h_slide(A=A, h=h)
slide_score_unnorm = bag_classifier(slide_agg_rep)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(tf.reshape(slide_score_unnorm,
(1, n_class))) # shape be (1,2), predictions for each of the classes
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true
def m_bag_h_slide(A, h, dim_compress_features, n_class):
SAR = list()
for i in range(len(A)):
sar = tf.linalg.matmul(tf.transpose(A[i]), h[i]) # shape be (2,512)
SAR.append(sar)
SAR_Branch = list()
for i in range(n_class):
sar_branch = list()
for j in range(len(SAR)):
sar_c = tf.reshape(SAR[j][i], (1, dim_compress_features))
sar_branch.append(sar_c)
SAR_Branch.append(sar_branch)
slide_agg_rep = list()
for k in range(n_class):
slide_agg_rep.append(tf.math.add_n(SAR_Branch[k]))
return slide_agg_rep
def m_bag_call(m_bag_classifier, bag_label, A, h, n_class, dim_compress_features):
slide_agg_rep = m_bag_h_slide(A=A, h=h, dim_compress_features=dim_compress_features, n_class=n_class)
ssus = list()
# return s_[slide,m] (slide-level prediction scores)
for i in range(n_class):
bag_classifier = m_bag_classifier[i]
ssu = bag_classifier(slide_agg_rep[i])
ssus.append(ssu[0][0])
slide_score_unnorm = tf.convert_to_tensor(ssus)
slide_score_unnorm = tf.reshape(slide_score_unnorm, (1, n_class))
Y_hat = tf.math.top_k(slide_score_unnorm, 1)[1][-1]
Y_prob = tf.math.softmax(slide_score_unnorm)
predict_slide_label = np.argmax(Y_prob.numpy())
Y_true = tf.one_hot([bag_label], 2)
return slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true
def s_clam_call(att_net, ins_net, bag_net, img_features, slide_label,
n_class, top_k_percent, att_gate, att_only, mil_ins, mut_ex):
if att_gate:
h, A = g_att_call(g_att_net=att_net, img_features=img_features)
else:
h, A = ng_att_call(ng_att_net=att_net, img_features=img_features)
att_score = A # output from attention network
A = tf.math.softmax(A) # softmax on attention scores
if att_only:
return att_score
if mil_ins:
ins_labels, ins_logits_unnorm, ins_logits = ins_call(m_ins_classifier=ins_net,
bag_label=slide_label,
h=h, A=A,
n_class=n_class,
top_k_percent=top_k_percent,
mut_ex=mut_ex)
slide_score_unnorm, Y_hat, Y_prob, predict_slide_label, Y_true = s_bag_call(bag_classifier=bag_net,
bag_label=slide_label,
A=A, h=h, n_class=n_class)
return att_score, A, h, ins_labels, ins_logits_unnorm, ins_logits, \
slide_score_unnorm, Y_prob, Y_hat, Y_true, predict_slide_label
def m_clam_call(att_net, ins_net, bag_net, img_features, slide_label,
n_class, dim_compress_features, top_k_percent, att_gate, att_only, mil_ins, mut_ex):
if att_gate:
h, A = g_att_call(g_att_net=att_net, img_features=img_features)
else:
h, A = ng_att_call(ng_att_net=att_net, img_features=img_features)
att_score = A # output from attention network
A = tf.math.softmax(A) # softmax on attention scores
if att_only:
return att_score
if mil_ins:
ins_labels, ins_logits_unnorm, ins_logits = ins_call(m_ins_classifier=ins_net,
bag_label=slide_label,
h=h, A=A,
n_class=n_class,
top_k_percent=top_k_percent,
mut_ex=mut_ex)
slide_score_unnorm, Y_hat, Y_prob, \
predict_slide_label, Y_true = m_bag_call(m_bag_classifier=bag_net, bag_label=slide_label,
A=A, h=h, n_class=n_class,
dim_compress_features=dim_compress_features)
return att_score, A, h, ins_labels, ins_logits_unnorm, ins_logits, \
slide_score_unnorm, Y_prob, Y_hat, Y_true, predict_slide_label
def model_save(c_model, c_model_dir, n_class, m_clam_op, att_gate):
clam_model_names = ['_Att', '_Ins', '_Bag']
if m_clam_op:
if att_gate:
att_nets = c_model.clam_model()[0]
for m in range(len(att_nets)):
att_nets[m].save(os.path.join(c_model_dir, 'G' + clam_model_names[0], 'Model_' + str(m + 1)))
else:
| |
# Module containing functions to compute the SPIKE directionality and the
# spike train order profile
# Copyright 2015, <NAME> <<EMAIL>>
# Distributed under the BSD License
from __future__ import absolute_import
import numpy as np
import pyspike
from pyspike import DiscreteFunc
from functools import partial
from pyspike.generic import _generic_profile_multi
############################################################
# spike_directionality_values
############################################################
def spike_directionality_values(*args, **kwargs):
""" Computes the spike directionality value for each spike in
each spike train. Returns a list containing an array of spike directionality
values for every given spike train.
Valid call structures::
spike_directionality_values(st1, st2) # returns the bi-variate profile
spike_directionality_values(st1, st2, st3) # multi-variate profile of 3
# spike trains
spike_trains = [st1, st2, st3, st4] # list of spike trains
spike_directionality_values(spike_trains) # profile of the list of spike trains
spike_directionality_values(spike_trains, indices=[0, 1]) # use only the spike trains
# given by the indices
Additonal arguments:
:param max_tau: Upper bound for coincidence window (default=None).
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:returns: The spike directionality values :math:`D^n_i` as a list of arrays.
"""
if len(args) == 1:
return _spike_directionality_values_impl(args[0], **kwargs)
else:
return _spike_directionality_values_impl(args, **kwargs)
def _spike_directionality_values_impl(spike_trains, indices=None,
interval=None, max_tau=None):
""" Computes the multi-variate spike directionality profile
of the given spike trains.
:param spike_trains: List of spike trains.
:type spike_trains: List of :class:`pyspike.SpikeTrain`
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:type indices: list or None
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The spike-directionality values.
"""
if interval is not None:
raise NotImplementedError("Parameter `interval` not supported.")
if indices is None:
indices = np.arange(len(spike_trains))
indices = np.array(indices)
# check validity of indices
assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \
"Invalid index list."
# list of arrays for resulting asymmetry values
asymmetry_list = [np.zeros_like(spike_trains[n].spikes) for n in indices]
# generate a list of possible index pairs
pairs = [(indices[i], j) for i in range(len(indices))
for j in indices[i+1:]]
# cython implementation
try:
from .cython.cython_directionality import \
spike_directionality_profiles_cython as profile_impl
except ImportError:
if not(pyspike.disable_backend_warning):
print("Warning: spike_distance_cython not found. Make sure that \
PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use python backend
from .cython.directionality_python_backend import \
spike_directionality_profile_python as profile_impl
if max_tau is None:
max_tau = 0.0
for i, j in pairs:
d1, d2 = profile_impl(spike_trains[i].spikes, spike_trains[j].spikes,
spike_trains[i].t_start, spike_trains[i].t_end,
max_tau)
asymmetry_list[i] += d1
asymmetry_list[j] += d2
for a in asymmetry_list:
a /= len(spike_trains)-1
return asymmetry_list
############################################################
# spike_directionality
############################################################
def spike_directionality(spike_train1, spike_train2, normalize=True,
interval=None, max_tau=None):
""" Computes the overall spike directionality of the first spike train with
respect to the second spike train.
:param spike_train1: First spike train.
:type spike_train1: :class:`pyspike.SpikeTrain`
:param spike_train2: Second spike train.
:type spike_train2: :class:`pyspike.SpikeTrain`
:param normalize: Normalize by the number of spikes (multiplicity).
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The spike train order profile :math:`E(t)`.
"""
if interval is None:
# distance over the whole interval is requested: use specific function
# for optimal performance
try:
from .cython.cython_directionality import \
spike_directionality_cython as spike_directionality_impl
if max_tau is None:
max_tau = 0.0
d = spike_directionality_impl(spike_train1.spikes,
spike_train2.spikes,
spike_train1.t_start,
spike_train1.t_end,
max_tau)
c = len(spike_train1.spikes)
except ImportError:
if not(pyspike.disable_backend_warning):
print("Warning: spike_distance_cython not found. Make sure that \
PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use profile.
d1, x = spike_directionality_values([spike_train1, spike_train2],
interval=interval,
max_tau=max_tau)
d = np.sum(d1)
c = len(spike_train1.spikes)
if normalize:
return 1.0*d/c
else:
return d
else:
# some specific interval is provided: not yet implemented
raise NotImplementedError("Parameter `interval` not supported.")
############################################################
# spike_directionality_matrix
############################################################
def spike_directionality_matrix(spike_trains, normalize=True, indices=None,
interval=None, max_tau=None):
""" Computes the spike directionality matrix for the given spike trains.
:param spike_trains: List of spike trains.
:type spike_trains: List of :class:`pyspike.SpikeTrain`
:param normalize: Normalize by the number of spikes (multiplicity).
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:type indices: list or None
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The spike-directionality values.
"""
if indices is None:
indices = np.arange(len(spike_trains))
indices = np.array(indices)
# check validity of indices
assert (indices < len(spike_trains)).all() and (indices >= 0).all(), \
"Invalid index list."
# generate a list of possible index pairs
pairs = [(indices[i], j) for i in range(len(indices))
for j in indices[i+1:]]
distance_matrix = np.zeros((len(indices), len(indices)))
for i, j in pairs:
d = spike_directionality(spike_trains[i], spike_trains[j], normalize,
interval, max_tau=max_tau)
distance_matrix[i, j] = d
distance_matrix[j, i] = -d
return distance_matrix
############################################################
# spike_train_order_profile
############################################################
def spike_train_order_profile(*args, **kwargs):
""" Computes the spike train order profile :math:`E(t)` of the given
spike trains. Returns the profile as a DiscreteFunction object.
Valid call structures::
spike_train_order_profile(st1, st2) # returns the bi-variate profile
spike_train_order_profile(st1, st2, st3) # multi-variate profile of 3
# spike trains
spike_trains = [st1, st2, st3, st4] # list of spike trains
spike_train_order_profile(spike_trains) # profile of the list of spike trains
spike_train_order_profile(spike_trains, indices=[0, 1]) # use only the spike trains
# given by the indices
Additonal arguments:
:param max_tau: Upper bound for coincidence window, `default=None`.
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:returns: The spike train order profile :math:`E(t)`
:rtype: :class:`.DiscreteFunction`
"""
if len(args) == 1:
return spike_train_order_profile_multi(args[0], **kwargs)
elif len(args) == 2:
return spike_train_order_profile_bi(args[0], args[1], **kwargs)
else:
return spike_train_order_profile_multi(args, **kwargs)
############################################################
# spike_train_order_profile_bi
############################################################
def spike_train_order_profile_bi(spike_train1, spike_train2, max_tau=None):
""" Computes the spike train order profile P(t) of the two given
spike trains. Returns the profile as a DiscreteFunction object.
:param spike_train1: First spike train.
:type spike_train1: :class:`pyspike.SpikeTrain`
:param spike_train2: Second spike train.
:type spike_train2: :class:`pyspike.SpikeTrain`
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The spike train order profile :math:`E(t)`.
:rtype: :class:`pyspike.function.DiscreteFunction`
"""
# check whether the spike trains are defined for the same interval
assert spike_train1.t_start == spike_train2.t_start, \
"Given spike trains are not defined on the same interval!"
assert spike_train1.t_end == spike_train2.t_end, \
"Given spike trains are not defined on the same interval!"
# cython implementation
try:
from .cython.cython_directionality import \
spike_train_order_profile_cython as \
spike_train_order_profile_impl
except ImportError:
# raise NotImplementedError()
if not(pyspike.disable_backend_warning):
print("Warning: spike_distance_cython not found. Make sure that \
PySpike is installed by running\n 'python setup.py build_ext --inplace'!\n \
Falling back to slow python backend.")
# use python backend
from .cython.directionality_python_backend import \
spike_train_order_profile_python as spike_train_order_profile_impl
if max_tau is None:
max_tau = 0.0
times, coincidences, multiplicity \
= spike_train_order_profile_impl(spike_train1.spikes,
spike_train2.spikes,
spike_train1.t_start,
spike_train1.t_end,
max_tau)
return DiscreteFunc(times, coincidences, multiplicity)
############################################################
# spike_train_order_profile_multi
############################################################
def spike_train_order_profile_multi(spike_trains, indices=None,
max_tau=None):
""" Computes the multi-variate spike train order profile for a set of
spike trains. For each spike in the set of spike trains, the multi-variate
profile is defined as the sum of asymmetry values divided by the number of
spike trains pairs involving the spike train of containing this spike,
which is the number of spike trains minus one (N-1).
:param spike_trains: list of :class:`pyspike.SpikeTrain`
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:type indices: list or None
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The multi-variate spike sync profile :math:`<S_{sync}>(t)`
:rtype: :class:`pyspike.function.DiscreteFunction`
"""
prof_func = partial(spike_train_order_profile_bi, max_tau=max_tau)
average_prof, M = _generic_profile_multi(spike_trains, prof_func,
indices)
return average_prof
############################################################
# _spike_train_order_impl
############################################################
def _spike_train_order_impl(spike_train1, spike_train2,
interval=None, max_tau=None):
""" Implementation of bi-variatae spike train order value (Synfire Indicator).
:param spike_train1: First spike train.
:type spike_train1: :class:`pyspike.SpikeTrain`
:param spike_train2: Second spike train.
:type spike_train2: :class:`pyspike.SpikeTrain`
:param max_tau: Maximum coincidence window size. If 0 or `None`, the
coincidence window has no upper bound.
:returns: The spike train order value (Synfire Indicator)
"""
if interval is None:
# distance over the whole interval is requested: use specific function
# for optimal performance
try:
| |
<filename>pygot/dendroutils.py<gh_stars>1-10
#!/usr/bin/env python
import copy
import dendropy
#this deals with changes in DendroPy 4
from dendropy.utility import bitprocessing
'''
try:
from dendropy.calculate import treesplit
except ImportError:
from dendropy import treesplit
'''
def compat_encode_bipartitions(tree, **kwargs):
'''Convenience function dealing with different ways of encoding splits in DP4'''
if hasattr(tree, "encode_bipartitions"):
if 'delete_outdegree_one' in kwargs:
val = kwargs.pop('delete_outdegree_one')
kwargs['collapse_unrooted_basal_bifurcation'] = val
tree.encode_bipartitions(**kwargs)
elif not hasattr(tree, "bipartition_encoding") or not tree.bipartition_encoding:
tree.encode_bipartitions(**kwargs)
class CustomTree(dendropy.Tree):
'''Override of denodropy.Tree, which redefines equality as an RF distance of 0.'''
def __eq__(self, other):
return dendropy.treecalc.symmetric_difference(self, other) == 0
def __hash__(self):
h = hash(''.join(sorted(self.as_newick_string())))
return h
class CustomTreeList(dendropy.TreeList):
'''dendropy.TreeList with a number of functions overriden to tweak functionality
'''
def __contains__(self, item):
'''Overridden function to allow basic use of 'in' keyword.
Identity defined by RF distance of 0.
if treeX in treelistY:
treeX.something()
NOT very efficient
NOTE: Other cheaper pre-filtering steps could be added (e.g.
compare number of nodes) but these are dangerous because of
how Dendropy handles rooting of trees
'''
for t in self:
if dendropy.treecalc.symmetric_difference(t, item) == 0:
return True
return False
def frequency_of_identical_trees(self, targetTree):
'''Return the proportion of trees in self that match targetTree.
Identity defined by RF distance of 0.
See NOTE in __contains__ regarding efficiency
'''
count = 0
for tree in self:
if dendropy.treecalc.symmetric_difference(tree, targetTree) == 0:
count += 1
return float(count) / len(self)
#def masked_frequency_of_split(self, **kwargs):
def masked_frequency_of_bipartition(self, **kwargs):
"""Adaptation of dendropy.TreeList.frequency_of_bipartition that takes a taxon mask.
This allows identifying splits on a subset of taxa within a larger tree without
pruning any tree structures, which is much slower.
Given a split or bipartition specified as:
- a split bitmask given the keyword 'split_bitmask'
- a list of `Taxon` objects given with the keyword `taxa`
- a list of taxon labels given with the keyword `labels`
- a list of oids given with the keyword `oids`
this function returns the proportion of trees in self in which the
split is found.
"""
partialMask = kwargs["mask"] if "mask" in kwargs else self.taxon_namespace.all_taxa_bitmask()
if "split_bitmask" in kwargs:
targetSplit = kwargs["split_bitmask"]
else:
targetSplit = self.taxon_namespace.get_taxa_bitmask(**kwargs)
k = kwargs.values()[0]
if bitprocessing.num_set_bits(targetSplit) != len(k):
raise IndexError('Not all taxa could be mapped to split (%s): %s'
% (self.taxon_namespace.split_bitmask_string(targetSplit), k))
found = 0
total = 0
for tree in self:
tree.compat_encode_bipartitions()
total += 1
compSplit = (~targetSplit & partialMask)
#for test_split in tree.split_edges:
for test_split in tree.reference_tree.bipartition_encoding:
if not treesplit.is_compatible(test_split, targetSplit, partialMask):
break
masked_test = (test_split & partialMask)
if targetSplit == masked_test or compSplit == masked_test:
found += 1
break
return float(found) / total
def masked_frequency_of_splitlist(self, returnMatches=False, **kwargs):
"""As masked_frequency_of_split, but counts trees that contain a list of splits.
Given a LIST of splits or bipartitions specified as:
- a split bitmask given the keyword 'split_bitmask'
- a list of `Taxon` objects given with the keyword `taxa`
- a list of taxon labels given with the keyword `labels`
- a list of oids given with the keyword `oids`
this function returns the proportion of trees in self
in which all of the splits are found.
NOTE: This is not that useful in some cases here you call it sucessively with
different numbers of splits and expect the freqs to add up to 1.0
"""
#if returnMatches is requested, return matching trees
matches = []
partialMask = kwargs["mask"] if "mask" in kwargs else self.taxon_namespace.all_taxa_bitmask()
if "split_bitmask" in kwargs:
targetSplits = kwargs["split_bitmask"]
else:
split = self.taxon_namespace.get_taxa_bitmask(**kwargs)
k = kwargs.values()[0]
if treesplit.count_bits(split) != len(k):
raise IndexError('Not all taxa could be mapped to split (%s): %s'
% (self.taxon_namespace.split_bitmask_string(split), k))
found = 0
total = 0
for tnum, tree in enumerate(self):
compat_encode_bipartitions(tree)
total += 1
matchedSplits = 0
incompatible = False
#work through the required splits
for num, targetSplit in enumerate(targetSplits):
compSplit = (~targetSplit & partialMask)
#work through the splits in this tree
for test_split in tree.bipartition_encoding:
#mask out unimportant taxa
masked_test = (test_split.split_bitmask & partialMask)
#don't need to test anything if masked_test is empty
#(i.e., no taxa in partialMask appear on opposite sides of test_split
if masked_test:
#if not treesplit.is_compatible(test_split.split_bitmask, targetSplit, partialMask):
if not dendropy.Bipartition.is_compatible_bitmasks(test_split.split_bitmask, targetSplit, partialMask):
incompatible = True
break
elif targetSplit == masked_test or compSplit == masked_test:
matchedSplits += 1
break
if incompatible:
break
if not incompatible and matchedSplits == len(targetSplits):
found += 1
if returnMatches:
matches.append(copy.deepcopy(tree))
if returnMatches:
return float(found) / total, matches
else:
return float(found) / total
def generate_all_trees_for_taxon_list(self, taxon_list, min_bipartitions=None, max_bipartitions=None, criterion=None):
'''Will call functions to generate newick strings representing all possible trees for taxon set.
Work must be done here to make that list fully unique allowing for differences in tree rotation.
Can pass min and max bipartitions to control resolvedness of trees, or omit to only generate fully resolved.
This is impractically slow for > 6 taxa.
>>> TL = MyTreeList()
>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd'])
>>> len(TL)
3
>>> TL = MyTreeList()
>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd', 'e'])
>>> len(TL)
15
#don't think that this works for trees with polytomies
#>>> TL = MyTreeList()
#>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd', 'e'], min_bipartitions=0, max_bipartitions=2)
#>>> len(TL)
#26
#>>> TL = MyTreeList()
#>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd', 'e', 'f'])
#>>> len(TL)
105
#>>> TL = MyTreeList()
#>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
#>>> len(TL)
945
#>>> TL = MyTreeList()
#>>> TL.generate_all_trees_for_taxon_list(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
#>>> len(TL)
10395
'''
ntax = len(taxon_list)
#default to fully resolved trees
min_bipartitions = max(min_bipartitions, 0) if min_bipartitions else ntax - 3
max_bipartitions = max(max_bipartitions, ntax - 3) if max_bipartitions else ntax - 3
componentLists = combine_components_and_uniqueify(taxon_list, min_components=ntax - max_bipartitions, max_components=ntax - min_bipartitions, criterion=criterion)
fullString = ''
for componentList in componentLists:
#the component list is a list of tuples, so actually the repr is exactly newick representation
fullString += repr(componentList) + ';'
self.read_from_string(fullString, 'newick')
#print len(self)
#for t in self:
# print t.as_newick_string()
newList = TreeList(self, taxon_namespace=self.taxon_namespace)
#print len(newList)
self[:] = []
for num, tr in enumerate(newList):
#if tr not in TreeList(self[num+1:]):
if tr not in self:
self.append(tr)
#print tr.as_newick_string()
def as_python_source(self, tree_list_name=None, tree_list_args=None, oids=False):
"""As dendropy.TreeList.as_python_source, but source instantiates a CustomTreeList
Returns string that will rebuild this tree list in Python.
"""
p = []
if tree_list_name is None:
tree_list_name = "tree_list_%s" % id(self)
if self.label is not None:
label = "'" + self.label + "'"
else:
label = "None"
if oids:
oid_str = ', oid="%s"' % self.oid
else:
oid_str = ""
if tree_list_args is None:
tree_list_args = ""
else:
tree_list_args = ", " + tree_list_args
p.append("%s = CustomTreeList(label=%s%s%s)"
% (tree_list_name,
label,
oid_str,
tree_list_args))
taxon_obj_namer = lambda x: "tax_%s" % id(x)
taxon_map = {}
for taxon in self.taxon_namespace:
tobj_name = taxon_obj_namer(taxon)
if taxon.label is not None:
label = "'" + taxon.label + "'"
else:
label = "None"
if oids:
oid_str = ', oid="%s"' % taxon.oid
else:
oid_str = ""
p.append("%s = %s.taxon_namespace.require_taxon(label=%s%s)"
% (tobj_name,
tree_list_name,
label,
oid_str))
taxon_map[taxon] = tobj_name
node_obj_namer = lambda x: "nd_%s" % id(x)
for tree in self:
tree_obj_name = "tree_%s" % id(tree)
if tree.label is not None:
label = "'" + tree.label + "'"
else:
label = "None"
if oids:
oid_str = ', oid="%s"' % tree.oid
else:
oid_str = ""
p.append("%s = dendropy.Tree(label=%s, taxon_namespace=%s.taxon_namespace%s)"
% (tree_obj_name,
label,
tree_list_name,
oid_str))
p.append("%s.append(%s, reindex_taxa=False)" % (tree_list_name, tree_obj_name))
if oids:
p.append("%s.seed_node.oid = '%s'" % (tree_obj_name, tree.seed_node.oid))
for node in tree.preorder_node_iter():
for child in node.child_nodes():
if node is tree.seed_node:
nn = "%s.seed_node" % tree_obj_name
else:
nn = node_obj_namer(node)
if child.label is not None:
label = "'" + child.label + "'"
else:
label = "None"
if child.taxon is not None:
ct = taxon_obj_namer(child.taxon)
else:
ct = "None"
if oids:
oid_str = ', oid="%s"' % child.oid
else:
oid_str = ""
p.append("%s = %s.new_child(label=%s, taxon=%s, edge_length=%s%s)" %
(node_obj_namer(child),
nn,
label,
ct,
child.edge.length,
oid_str))
if oids:
p.append('%s.edge.oid = "%s"' % (node_obj_namer(child), child.edge.oid))
return "\n".join(p)
'''
#haven't finished implmenting this yet
def resolve_polytomies(self, source_tree, update_splits=False, | |
<reponame>sukruthG/nimi-python<gh_stars>0
# -*- coding: utf-8 -*-
# This file was generated
import array # noqa: F401
import ctypes
# Used by @ivi_synchronized
from functools import wraps
import nidcpower._attributes as _attributes
import nidcpower._converters as _converters
import nidcpower._library_singleton as _library_singleton
import nidcpower._visatype as _visatype
import nidcpower.enums as enums
import nidcpower.errors as errors
import hightime
# Used for __repr__
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Helper functions for creating ctypes needed for calling into the driver DLL
def get_ctypes_pointer_for_buffer(value=None, library_type=None, size=None):
if isinstance(value, array.array):
assert library_type is not None, 'library_type is required for array.array'
addr, _ = value.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(library_type))
elif str(type(value)).find("'numpy.ndarray'") != -1:
import numpy
return numpy.ctypeslib.as_ctypes(value)
elif isinstance(value, bytes):
return ctypes.cast(value, ctypes.POINTER(library_type))
elif isinstance(value, list):
assert library_type is not None, 'library_type is required for list'
return (library_type * len(value))(*value)
else:
if library_type is not None and size is not None:
return (library_type * size)()
else:
return None
def get_ctypes_and_array(value, array_type):
if value is not None:
if isinstance(value, array.array):
value_array = value
else:
value_array = array.array(array_type, value)
else:
value_array = None
return value_array
class _Acquisition(object):
def __init__(self, session):
self._session = session
self._session._initiate_with_channels()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.abort()
# From https://stackoverflow.com/questions/5929107/decorators-with-parameters
def ivi_synchronized(f):
@wraps(f)
def aux(*xs, **kws):
session = xs[0] # parameter 0 is 'self' which is the session object
with session.lock():
return f(*xs, **kws)
return aux
class _Lock(object):
def __init__(self, session):
self._session = session
def __enter__(self):
# _lock_session is called from the lock() function, not here
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.unlock()
class _RepeatedCapabilities(object):
def __init__(self, session, prefix, current_repeated_capability_list):
self._session = session
self._prefix = prefix
# We need at least one element. If we get an empty list, make the one element an empty string
self._current_repeated_capability_list = current_repeated_capability_list if len(current_repeated_capability_list) > 0 else ['']
# Now we know there is at lease one entry, so we look if it is an empty string or not
self._separator = '/' if len(self._current_repeated_capability_list[0]) > 0 else ''
def __getitem__(self, repeated_capability):
'''Set/get properties or call methods with a repeated capability (i.e. channels)'''
rep_caps_list = _converters.convert_repeated_capabilities(repeated_capability, self._prefix)
complete_rep_cap_list = [current_rep_cap + self._separator + rep_cap for current_rep_cap in self._current_repeated_capability_list for rep_cap in rep_caps_list]
return _SessionBase(vi=self._session._vi, repeated_capability_list=complete_rep_cap_list, library=self._session._library, encoding=self._session._encoding, freeze_it=True)
# This is a very simple context manager we can use when we need to set/get attributes
# or call functions from _SessionBase that require no channels. It is tied to the specific
# implementation of _SessionBase and how repeated capabilities are handled.
class _NoChannel(object):
def __init__(self, session):
self._session = session
def __enter__(self):
self._repeated_capability_cache = self._session._repeated_capability
self._session._repeated_capability = ''
def __exit__(self, exc_type, exc_value, traceback):
self._session._repeated_capability = self._repeated_capability_cache
class _SessionBase(object):
'''Base class for all NI-DCPower sessions.'''
# This is needed during __init__. Without it, __setattr__ raises an exception
_is_frozen = False
active_advanced_sequence = _attributes.AttributeViString(1150074)
'''Type: str
Specifies the advanced sequence to configure or generate.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].active_advanced_sequence`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.active_advanced_sequence`
'''
active_advanced_sequence_step = _attributes.AttributeViInt64(1150075)
'''Type: int
Specifies the advanced sequence step to configure.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].active_advanced_sequence_step`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.active_advanced_sequence_step`
'''
actual_power_allocation = _attributes.AttributeViReal64(1150205)
'''Type: float
Returns the power, in watts, the device is sourcing on each active channel if the power_allocation_mode property is set to PowerAllocationMode.AUTOMATIC or PowerAllocationMode.MANUAL.
Valid Values: [0, device per-channel maximum power]
Default Value: Refer to the Supported Properties by Device topic for the default value by device.
Note: This property is not supported by all devices. Refer to the Supported Properties by Device topic for information about supported devices.
This property returns -1 when the power_allocation_mode property is set to PowerAllocationMode.DISABLED.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].actual_power_allocation`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.actual_power_allocation`
'''
aperture_time = _attributes.AttributeViReal64(1150058)
'''Type: float
Specifies the measurement aperture time for the channel configuration. Aperture time is specified in the units set by the aperture_time_units property.
for information about supported devices.
Refer to the Aperture Time topic in the NI DC Power Supplies and SMUs Help for more information about how to configure your measurements and for information about valid values.
Default Value: 0.01666666 seconds
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].aperture_time`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.aperture_time`
'''
aperture_time_units = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ApertureTimeUnits, 1150059)
'''Type: enums.ApertureTimeUnits
Specifies the units of the aperture_time property for the channel configuration.
for information about supported devices.
Refer to the Aperture Time topic in the NI DC Power Supplies and SMUs Help for more information about how to configure your measurements and for information about valid values.
Default Value: ApertureTimeUnits.SECONDS
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].aperture_time_units`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.aperture_time_units`
'''
autorange = _attributes.AttributeViInt32(1150244)
'''Type: bool
Specifies whether the hardware automatically selects the best range to measure the signal. Note the highest range the algorithm uses is dependent on the corresponding limit range property. The algorithm the hardware uses can be controlled using the autorange_aperture_time_mode property.
Note: Autoranging begins at module startup and remains active until the module is reconfigured or reset. This property is not supported by all devices. Refer to Supported Properties by Device topic.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].autorange`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.autorange`
'''
autorange_aperture_time_mode = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.AutorangeApertureTimeMode, 1150246)
'''Type: enums.AutorangeApertureTimeMode
Specifies whether the aperture time used for the measurement autorange algorithm is determined automatically or customized using the autorange_minimum_aperture_time property.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].autorange_aperture_time_mode`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.autorange_aperture_time_mode`
'''
autorange_behavior = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.AutorangeBehavior, 1150245)
'''Type: enums.AutorangeBehavior
Specifies the algorithm the hardware uses for measurement autoranging.
Note: This property is not supported by all devices. Refer to Supported Properties by Device topic.
Tip:
This property can be set/get on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset.
Example: :py:attr:`my_session.channels[ ... ].autorange_behavior`
To set/get on all channels, you can call the property directly on the :py:class:`nidcpower.Session`.
Example: :py:attr:`my_session.autorange_behavior`
'''
autorange_minimum_aperture_time = _attributes.AttributeViReal64(1150247)
'''Type: float
Specifies the measurement autorange aperture time used for the measurement autorange algorithm. The aperture time is specified in the units set by the autorange_minimum_aperture_time_units property. | |
<reponame>zhouwei25/Paddle<filename>tools/sampcd_processor.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import multiprocessing
import math
import platform
"""
please make sure to run in the tools path
usage: python sample_test.py {arg1}
arg1: the first arg defined running in gpu version or cpu version
for example, you can run cpu version python2 testing like this:
python sampcd_processor.py cpu
"""
def find_all(srcstr, substr):
"""
to find all desired substring in the source string
and return their starting indices as a list
Args:
srcstr(str): the parent string
substr(str): substr
Returns:
list: a list of the indices of the substrings
found
"""
indices = []
gotone = srcstr.find(substr)
while (gotone != -1):
indices.append(gotone)
gotone = srcstr.find(substr, gotone + 1)
return indices
def check_indent(cdline):
"""
to check the indent of a given code line
to get the number of starting blank chars,
e.t. blankspaces and \t
\t will be interpreted as 4 single blankspaces,
e.t. '\t'=' '
Args:
cdline(str) : a single line of code from the source file
Returns:
int : the indent of the number of interpreted
blankspaces
"""
indent = 0
for c in cdline:
if c == '\t':
indent += 4
elif c == ' ':
indent += 1
if c != ' ' and c != '\t':
break
return indent
# srccom: raw comments in the source,including ''' and original indent
def sampcd_extract_and_run(srccom, name, htype="def", hname=""):
"""
Extract and run sample codes from source comment and
the result will be returned.
Args:
srccom(str): the source comment of some API whose
example codes will be extracted and run.
name(str): the name of the API.
htype(str): the type of hint banners, def/class/method.
hname(str): the name of the hint banners , e.t. def hname.
Returns:
result: True or False
"""
result = True
def sampcd_header_print(name, sampcd, htype, hname):
"""
print hint banner headers.
Args:
name(str): the name of the API.
sampcd(str): sample code string
htype(str): the type of hint banners, def/class/method.
hname(str): the name of the hint banners , e.t. def hname.
flushed.
"""
print_header(htype, hname)
print("Sample code ", str(y), " extracted for ", name, " :")
print(sampcd)
print("----example code check----\n")
print("executing sample code .....")
print("execution result:")
sampcd_begins = find_all(srccom, " code-block:: python")
if len(sampcd_begins) == 0:
print_header(htype, hname)
'''
detect sample codes using >>> to format
and consider this situation as wrong
'''
if srccom.find("Examples:") != -1:
print("----example code check----\n")
if srccom.find(">>>") != -1:
print(
"Deprecated sample code style:\n\n Examples:\n\n >>>codeline\n >>>codeline\n\n\n ",
"Please use '.. code-block:: python' to ",
"format sample code.\n")
result = False
else:
print("Error: No sample code!\n")
result = False
for y in range(1, len(sampcd_begins) + 1):
sampcd_begin = sampcd_begins[y - 1]
sampcd = srccom[sampcd_begin + len(" code-block:: python") + 1:]
sampcd = sampcd.split("\n")
# remove starting empty lines
while sampcd[0].replace(' ', '').replace('\t', '') == '':
sampcd.pop(0)
# the minimum indent, which is the indent of the first
# non-empty line
min_indent = check_indent(sampcd[0])
sampcd_to_write = []
for i in range(0, len(sampcd)):
cdline = sampcd[i]
# handle empty lines or those only with spaces/tabs
if cdline.strip() == '':
continue
this_indent = check_indent(cdline)
if this_indent < min_indent:
break
else:
cdline = cdline.replace('\t', ' ')
sampcd_to_write.append(cdline[min_indent:])
sampcd = '\n'.join(sampcd_to_write)
if sys.argv[1] == "cpu":
sampcd = '\nimport os\n' + 'os.environ["CUDA_VISIBLE_DEVICES"] = ""\n' + sampcd
if sys.argv[1] == "gpu":
sampcd = '\nimport os\n' + 'os.environ["CUDA_VISIBLE_DEVICES"] = "0"\n' + sampcd
sampcd += '\nprint(' + '\"' + name + ' sample code is executed successfully!\")'
if len(sampcd_begins) > 1:
tfname = name + "_example_" + str(y) + ".py"
else:
tfname = name + "_example" + ".py"
tempf = open("samplecode_temp/" + tfname, 'w')
tempf.write(sampcd)
tempf.close()
if platform.python_version()[0] == "2":
cmd = ["python", "samplecode_temp/" + tfname]
elif platform.python_version()[0] == "3":
cmd = ["python3", "samplecode_temp/" + tfname]
else:
print("Error: fail to parse python version!")
result = False
exit(1)
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = subprc.communicate()
msg = "".join(output.decode(encoding='utf-8'))
err = "".join(error.decode(encoding='utf-8'))
if subprc.returncode != 0:
print("\nSample code error found in ", name, ":\n")
sampcd_header_print(name, sampcd, htype, hname)
print("subprocess return code: ", str(subprc.returncode))
print("Error Raised from Sample Code ", name, " :\n")
print(err)
print(msg)
result = False
# msg is the returned code execution report
os.remove("samplecode_temp/" + tfname)
return result
def single_defcom_extract(start_from, srcls, is_class_begin=False):
"""
to extract a def function/class/method comments body
Args:
start_from(int): the line num of "def" header
srcls(list): the source file in lines
is_class_begin(bool): whether the start_from is a beginning a class. \
For a sole class body itself may end up with its method if it has no
docstring. But the body of \
a common def function can only be ended up by a none-indented def/class
Returns:
string : the extracted comment body, inclusive of its quote marks.
"""
i = start_from
fcombody = "" # def comment body
comstart = -1 # the starting line index of comment mark "'''" or """"""
# if it is not -1, it indicates the loop is in the comment body
comstyle = 0 # comment mark style ,comments quoted with ''' is coded as 1
# comments quoted with """ is coded as 2
for x in range(i + 1, len(srcls)):
if is_class_begin:
if srcls[x].replace('\t', ' ').startswith(' def '):
break
if srcls[x].startswith('def ') or srcls[x].startswith('class '):
break
else:
if (comstart == -1 and srcls[x].replace(" ", '').replace(
"\t", '').replace("\n", '').startswith("\"\"\"")):
comstart = x
comstyle = 2
continue
if (comstyle == 2 and comstart != -1 and
srcls[x].replace(" ", '').replace("\t", '').replace(
"\n", '').startswith("\"\"\"")):
break
if (comstart == -1 and srcls[x].replace(" ", '').replace(
"\t", '').replace("\n", '').startswith("\'\'\'")):
comstart = x
comstyle = 1
continue
if (comstyle == 1 and comstart != -1 and
srcls[x].replace(" ", '').replace("\t", '').replace(
"\n", '').startswith("\'\'\'")):
break
if (comstart !=
-1): # when the comments start, begin to add line to fcombody
fcombody += srcls[x]
return fcombody
def print_header(htype, name):
print(htype, " name:", name)
print("-----------------------")
def srccoms_extract(srcfile, wlist):
"""
Given a source file ``srcfile``, this function will
extract its API(doc comments) and run sample codes in the
API.
Args:
srcfile(file): the source file
wlist(list): white list
Returns:
result: True or False
"""
process_result = True
srcc = srcfile.read()
# 2. get defs and classes header line number
# set file pointer to its beginning
srcfile.seek(0, 0)
srcls = srcfile.readlines() # source lines
# 1. fetch__all__ list
allidx = srcc.find("__all__")
if allidx != -1:
alllist = []
# get all list for layers/ops.py
if srcfile.name.find("ops.py") != -1:
for ai in range(0, len(srcls)):
if srcls[ai].startswith("__all__"):
lb = srcls[ai].find('[')
rb = srcls[ai].find(']')
if lb == -1:
continue
allele = srcls[ai][lb + 1:rb].replace("'", '').replace(
" ", '').replace("\"", '')
alllist.append(allele)
if '' in alllist:
alllist.remove('')
else:
alllist_b = allidx + len("__all__")
allstr = srcc[alllist_b + srcc[alllist_b:].find("[") + 1:alllist_b +
srcc[alllist_b:].find("]")]
allstr = allstr.replace("\n", '').replace(" ", '').replace(
"'", '').replace("\"", '')
alllist = allstr.split(',')
if '' in alllist:
alllist.remove('')
api_alllist_count = len(alllist)
api_count = 0
handled = []
# get src contents in layers/ops.py
if srcfile.name.find("ops.py") != -1:
for i in range(0, len(srcls)):
if srcls[i].find("__doc__") != -1:
opname = srcls[i][:srcls[i].find("__doc__") - 1]
if opname in wlist:
continue
comstart = i
for j in range(i, len(srcls)):
if srcls[j].find("\"\"\"") != -1:
comstart = i
opcom = ""
for j in range(comstart + 1, len(srcls)):
opcom += srcls[j]
if srcls[j].find("\"\"\"") != -1:
break
process_result = sampcd_extract_and_run(opcom, opname,
"def", opname)
api_count += 1
handled.append(
opname) # ops.py also has normal formatted functions
# use list 'handled' to mark the functions have been handled here
# which will be ignored | |
<filename>tests/test_http.py
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import unittest
from gruvi import http
from gruvi.http import HttpServer, HttpClient, HttpMessage, HttpProtocol, ParsedUrl
from gruvi.http import parse_content_type, parse_te, parse_trailer, parse_url
from gruvi.http import get_header, remove_headers
from gruvi.stream import Stream, StreamClient
from gruvi.sync import Queue
from support import UnitTest, MockTransport
URL = ParsedUrl
class TestParseContentType(UnitTest):
def test_simple(self):
parsed = parse_content_type('text/plain')
self.assertEqual(parsed, ('text/plain', {}))
def test_params(self):
parsed = parse_content_type('text/plain; charset=foo')
self.assertEqual(parsed[0], 'text/plain')
self.assertEqual(parsed[1], {'charset': 'foo'})
def test_iso8859_1(self):
parsed = parse_content_type('text/plain; foo="bar\xfe"')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar\xfe'}))
def test_param_whitespace(self):
parsed = parse_content_type('text/plain; charset=foo ')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo'}))
def test_param_quoted(self):
parsed = parse_content_type('text/plain; charset="foo bar"')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo bar'}))
def test_param_quoted_pair(self):
parsed = parse_content_type('text/plain; charset="foo\\"bar"')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo"bar'}))
def test_param_empty(self):
parsed = parse_content_type('text/plain; charset=""')
self.assertEqual(parsed, ('text/plain', {'charset': ''}))
def test_param_multiple(self):
parsed = parse_content_type('text/plain; foo=bar; baz=qux')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar', 'baz': 'qux'}))
def test_param_multiple_missing_semi(self):
parsed = parse_content_type('text/plain; foo=bar baz=qux')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar'}))
class TestParseTE(UnitTest):
def test_simple(self):
parsed = parse_te('chunked')
self.assertEqual(parsed, [('chunked', None)])
def test_multiple(self):
parsed = parse_te('chunked, deflate')
self.assertEqual(parsed, [('chunked', None), ('deflate', None)])
def test_qvalue(self):
parsed = parse_te('deflate; q=0.5')
self.assertEqual(parsed, [('deflate', '0.5')])
def test_case_insensitive(self):
parsed = parse_te('dEfLaTe; Q=0.5')
self.assertEqual(parsed, [('dEfLaTe', '0.5')])
def test_illegal_qvalue(self):
parsed = parse_te('deflate; q=2.5')
self.assertEqual(parsed, [('deflate', None)])
def test_multiple_qvalue(self):
parsed = parse_te('deflate; q=0.5, zlib; q=0.8')
self.assertEqual(parsed, [('deflate', '0.5'), ('zlib', '0.8')])
class TestParseTrailer(UnitTest):
def test_simple(self):
parsed = parse_trailer('foo')
self.assertEqual(parsed, ['foo'])
def test_multiple(self):
parsed = parse_trailer('foo, bar')
self.assertEqual(parsed, ['foo', 'bar'])
def test_spacing(self):
parsed = parse_trailer('foo , bar ')
self.assertEqual(parsed, ['foo', 'bar'])
def test_wrong_separator(self):
parsed = parse_trailer('foo; bar')
self.assertEqual(parsed, ['foo'])
class TestParseUrl(UnitTest):
def test_test(self):
self.assertEqual(URL(), ('', '', '', '', '', '', ''))
self.assertEqual(URL(scheme='http'), ('http', '', '', '', '', '', ''))
self.assertEqual(URL(host='foo'), ('', 'foo', '', '', '', '', ''))
self.assertEqual(URL(path='/path'), ('', '', '/path', '', '', '', ''))
self.assertEqual(URL(query='foo=bar'), ('', '', '', 'foo=bar', '', '', ''))
self.assertEqual(URL(fragment='baz'), ('', '', '', '', 'baz', '', ''))
self.assertEqual(URL(port='80'), ('', '', '', '', '', '80', ''))
self.assertEqual(URL(userinfo='user:pass'), ('', '', '', '', '', '', 'user:pass'))
def test_origin(self):
parsed = parse_url('/path')
self.assertEqual(parsed, URL(path='/path'))
def test_absolute(self):
parsed = parse_url('http://example.com/path')
self.assertEqual(parsed, URL('http', 'example.com', '/path'))
def test_authority(self):
parsed = parse_url('example.com:80', is_connect=True)
self.assertEqual(parsed, URL('', 'example.com', port='80'))
def test_authority_error(self):
self.assertRaises(ValueError, parse_url, '/path', is_connect=True)
self.assertRaises(ValueError, parse_url, 'http://example.com:80', is_connect=True)
self.assertRaises(ValueError, parse_url, '*', is_connect=True)
def test_asterisk(self):
parsed = parse_url('*')
self.assertEqual(parsed, URL(path='*'))
def test_userinfo(self):
parsed = parse_url('http://user:pass@example.com')
self.assertEqual(parsed, URL('http', 'example.com', userinfo='user:pass'))
def test_port(self):
parsed = parse_url('http://example.com:80')
self.assertEqual(parsed, URL('http', 'example.com', port='80'))
def test_userinfo_port(self):
parsed = parse_url('http://user:pass@example.com:80')
self.assertEqual(parsed, URL('http', 'example.com', port='80', userinfo='user:pass'))
def test_default_scheme(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed, URL('http', 'www.example.com'))
parsed = parse_url('http://www.example.com')
self.assertEqual(parsed, URL('http', 'www.example.com'))
parsed = parse_url('www.example.com', default_scheme='https')
self.assertEqual(parsed, URL('https', 'www.example.com'))
parsed = parse_url('https://www.example.com', default_scheme='https')
self.assertEqual(parsed, URL('https', 'www.example.com'))
def test_addr(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed.addr, ('www.example.com', 80))
parsed = parse_url('https://www.example.com')
self.assertEqual(parsed.addr, ('www.example.com', 443))
def test_ssl(self):
parsed = parse_url('www.example.com')
self.assertFalse(parsed.ssl)
parsed = parse_url('http://www.example.com')
self.assertFalse(parsed.ssl)
parsed = parse_url('https://www.example.com')
self.assertTrue(parsed.ssl)
def test_target(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed.target, '/')
parsed = parse_url('www.example.com/foo')
self.assertEqual(parsed.target, '/foo')
parsed = parse_url('www.example.com?bar')
self.assertEqual(parsed.target, '/?bar')
parsed = parse_url('www.example.com/foo?bar')
self.assertEqual(parsed.target, '/foo?bar')
class TestGetHeader(UnitTest):
headers = [('foo', 'fooval'),
('bar', 'barval'),
('baz', 'bazval')]
def test_simple(self):
self.assertEqual(get_header(self.headers, 'foo'), 'fooval')
self.assertEqual(get_header(self.headers, 'bar'), 'barval')
self.assertEqual(get_header(self.headers, 'baz'), 'bazval')
def test_case_insensitive(self):
self.assertEqual(get_header(self.headers, 'Foo'), 'fooval')
self.assertEqual(get_header(self.headers, 'FOO'), 'fooval')
def test_not_present(self):
self.assertIsNone(get_header(self.headers, 'qux'))
def test_default_value(self):
self.assertEqual(get_header(self.headers, 'qux', 'quxval'), 'quxval')
class TestRemoveHeaders(UnitTest):
headers = [('foo', 'fooval1'),
('bar', 'barval1'),
('foo', 'fooval2'),
('baz', 'bazval'),
('bar', 'barval2')]
def test_simple(self):
self.assertEqual(remove_headers(self.headers[:], 'foo'),
[('bar', 'barval1'), ('baz', 'bazval'), ('bar', 'barval2')])
self.assertEqual(remove_headers(self.headers[:], 'bar'),
[('foo', 'fooval1'), ('foo', 'fooval2'), ('baz', 'bazval')])
def test_in_place(self):
headers = self.headers[:]
removed = remove_headers(headers, 'foo')
self.assertIs(headers, removed)
def test_non_quadratic(self):
# Ensure remove_headers() doesn't take quadratic time.
names = ('foo', 'bar', 'baz', 'qux')
headers = []
for i in range(100000):
name = names[i%4]
headers.append((name, name + 'val'))
removed = remove_headers(headers, 'foo')
self.assertEqual(len(removed), 75000)
class TestHttpProtocol(UnitTest):
def setUp(self):
super(TestHttpProtocol, self).setUp()
self.requests = Queue()
def store_request(self, message, transport, protocol):
self.requests.put(message)
protocol.writer.write(b'HTTP/1.1 200 OK\r\n\r\n')
def parse_request(self, *chunks):
# Parse the HTTP request made up of *chunks.
transport = MockTransport()
protocol = HttpProtocol(self.store_request, server_side=True)
transport.start(protocol)
for chunk in chunks:
protocol.data_received(chunk)
self.assertIsNone(protocol._error)
self.transport = transport
self.protocol = protocol
def get_request(self):
# Get a parsed request.
m = self.requests.get(timeout=1.0)
self.assertIsInstance(m, HttpMessage)
self.assertEqual(m.message_type, http.REQUEST)
return m
# Tests that parse a request
def test_simple_request(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.version, '1.1')
self.assertIsNone(m.status_code)
self.assertEqual(m.method, 'GET')
self.assertEqual(m.url, '/')
self.assertTrue(m._should_keep_alive)
self.assertEqual(m.parsed_url, URL(path='/'))
self.assertEqual(m.headers, [('Host', 'example.com')])
self.assertIsInstance(m.body, Stream)
self.assertTrue(m.body.buffer.eof)
def test_request_with_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 3\r\n\r\nFoo'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '3')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_body_incremental(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 3\r\n\r\nFoo'
self.parse_request(*[r[i:i+1] for i in range(len(r))])
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '3')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body_incremental(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_request(*[r[i:i+1] for i in range(len(r))])
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body_and_trailers(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\nETag: foo\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked'),
('ETag', 'foo')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_pipelined_requests(self):
r = b'GET /0 HTTP/1.1\r\nHost: example0.com\r\n\r\n' \
b'GET /1 HTTP/1.1\r\nHost: example1.com\r\n\r\n'
self.parse_request(r)
for i in range(2):
m = self.get_request()
self.assertEqual(m.url, '/{0}'.format(i))
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example{}.com'.format(i))])
self.assertTrue(m.body.buffer.eof)
def test_pipelined_requests_with_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 4\r\n\r\nFoo0' \
b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 4\r\n\r\nFoo1'
self.parse_request(r)
for i in range(2):
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '4')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), 'Foo{}'.format(i).encode('ascii'))
self.assertTrue(m.body.buffer.eof)
def test_request_url(self):
r = b'GET /foo/bar HTTP/1.1\r\n' \
b'Host: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.parsed_url, URL(path='/foo/bar'))
def test_long_request_url(self):
r = b'GET http://user:pass@example.com:80/foo/bar?baz=qux#quux HTTP/1.1\r\n' \
b'Host: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.parsed_url, URL('http', 'example.com', '/foo/bar', 'baz=qux', 'quux',
port='80', userinfo='user:pass'))
# Tests that parse a response
def parse_response(self, *chunks, **kwargs):
# Parse the HTTP resposne made up of *chunks.
transport = MockTransport()
protocol = HttpProtocol()
transport.start(protocol)
methods = kwargs.get('methods', [])
if methods:
protocol._requests = methods
for chunk in chunks:
protocol.data_received(chunk)
self.assertIsNone(protocol._error)
self.transport = transport
self.protocol = protocol
def get_response(self):
# Get a parsed resposne.
m = self.protocol.getresponse()
self.assertIsInstance(m, HttpMessage)
self.assertEqual(m.message_type, http.RESPONSE)
return m
def test_simple_response(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '0')])
self.assertEqual(m.get_header('Content-Length'), '0')
self.assertEqual(m.body.read(), b'')
self.assertTrue(m.body.buffer.eof)
def test_response_with_body(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nFoo'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '3')])
self.assertEqual(m.get_header('Content-Length'), '3')
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_body_incremental(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nFoo'
self.parse_response([r[i:i+1] for i in range(len(r))])
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '3')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body_incremental(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_response([r[i:i+1] for i in range(len(r))])
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body_and_trailers(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\nETag: foo\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked'), ('ETag', 'foo')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_pipelined_responses(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\nSet-Cookie: foo=0\r\n\r\n' \
b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\nSet-Cookie: foo=1\r\n\r\n'
self.parse_response(r)
for i in range(2):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
cookie = 'foo={0}'.format(i)
self.assertEqual(m.headers, [('Content-Length', '0'), ('Set-Cookie', cookie)])
self.assertEqual(m.body.read(), b'')
def test_pipelined_responses_with_body(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nFoo0' \
b'HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nFoo1'
self.parse_response(r)
for i in range(2):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '4')])
self.assertEqual(m.body.read(), 'Foo{0}'.format(i).encode('ascii'))
self.assertEqual(m.body.read(), b'')
def test_pipelined_head_responses(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\n' \
b'HTTP/1.1 200 OK\r\nContent-Length: | |
with ``clipa_movement=SLIDE`` and ``clipb_movement=SLIDE``.
"""
return linear_boundary(clipa, clipb, MiscConstants.SLIDE, MiscConstants.SLIDE, frames=frames, direction=direction)
def slide_expand(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, direction: Direction = Direction.LEFT
):
"""First clip slides out of view, while second clip expands into view from nothing.
`clipa` slides off of the screen towards `direction`.
`clipb` expands into view from the opposite side of the given direction.
Alias for :func:`linear_boundary` with ``clipa_movement=SLIDE`` and ``clipb_movement=EXPAND``.
"""
return linear_boundary(clipa, clipb, MiscConstants.SLIDE, MiscConstants.EXPAND, frames=frames, direction=direction)
def squeeze_slide(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, direction: Direction = Direction.LEFT
):
"""First clip squeezes into nothing, while second clip slides into view.
`clipa` gets compressed off of the screen towards `direction`.
`clipb` slides into view from the opposite side of the given direction.
Alias for :func:`linear_boundary` with ``clipa_movement=SQUEEZE`` and ``clipb_movement=SLIDE``.
"""
return linear_boundary(clipa, clipb, MiscConstants.SQUEEZE, MiscConstants.SLIDE, frames=frames, direction=direction)
def squeeze_expand(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, direction: Direction = Direction.LEFT
):
"""First clip squeezes into nothing, while second clip expands into view from nothing.
`clipa` gets compressed off of the screen towards `direction`.
`clipb` expands into view from the opposite side of the given direction.
Alias for :func:`linear_boundary` with ``clipa_movement=SQUEEZE`` and ``clipb_movement=EXPAND``.
"""
return linear_boundary(
clipa, clipb, MiscConstants.SQUEEZE, MiscConstants.EXPAND, frames=frames, direction=direction
)
def cover(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, direction: Direction = Direction.LEFT
) -> vs.VideoNode:
"""Second clip slides in and covers the first clip which stays in place.
`clipb` slides into frame towards `direction` covering `clipa`.
"""
if direction not in [Direction.LEFT, Direction.RIGHT, Direction.UP, Direction.DOWN]:
raise ValueError("cover: give a proper direction")
frames_ = frames or min(clipa.num_frames, clipb.num_frames)
if TYPE_CHECKING:
assert isinstance(frames_, int)
_check_clips(frames_, cover, clipa, clipb)
clipa_clean, clipb_clean, clipa_t_zone, clipb_t_zone = _transition_clips(clipa, clipb, frames_)
def _cover(n: int) -> vs.VideoNode:
progress = Fraction(n, frames_ - 1)
w = math.floor(progress * clipa.width)
h = math.floor(progress * clipa.height)
if progress == 0:
return clipa_t_zone
elif progress == 1:
return clipb_t_zone
if direction in [Direction.LEFT, Direction.RIGHT]:
if w == 0:
return clipa_t_zone
elif direction in [Direction.UP, Direction.DOWN]:
if h == 0:
return clipa_t_zone
if direction == Direction.LEFT:
cropped_a = clipa_t_zone.std.Crop(right=w)
stack = core.std.StackHorizontal([cropped_a, clipb_t_zone])
return stack.resize.Spline36(width=clipa.width, src_width=clipa.width)
elif direction == Direction.RIGHT:
cropped_a = clipa_t_zone.std.Crop(left=w)
stack = core.std.StackHorizontal([clipb_t_zone, cropped_a])
return stack.resize.Spline36(width=clipa.width, src_left=clipa.width - w, src_width=clipa.width)
elif direction == Direction.UP:
cropped_a = clipa_t_zone.std.Crop(bottom=h)
stack = core.std.StackVertical([cropped_a, clipb_t_zone])
return stack.resize.Spline36(height=clipa.height, src_height=clipa.height)
elif direction == Direction.DOWN:
cropped_a = clipa_t_zone.std.Crop(top=h)
stack = core.std.StackVertical([clipb_t_zone, cropped_a])
return stack.resize.Spline36(height=clipa.height, src_top=clipa.height - h, src_height=clipa.height)
covered = core.std.FrameEval(core.std.BlankClip(clipa, length=frames_), _cover)
return _return_combo(clipa_clean, covered, clipb_clean)
def reveal(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, direction: Direction = Direction.LEFT
) -> vs.VideoNode:
"""First clip slides out of view exposing second clip that stays in place.
`clipa` slides out of frame towards `direction` revealing `clipb`.
"""
if direction not in [Direction.LEFT, Direction.RIGHT, Direction.UP, Direction.DOWN]:
raise ValueError("reveal: give a proper direction")
frames_ = frames or min(clipa.num_frames, clipb.num_frames)
if TYPE_CHECKING:
assert isinstance(frames_, int)
_check_clips(frames_, reveal, clipa, clipb)
clipa_clean, clipb_clean, clipa_t_zone, clipb_t_zone = _transition_clips(clipa, clipb, frames_)
def _reveal(n: int) -> vs.VideoNode:
progress = 1 - Fraction(n, frames_ - 1)
w = math.floor(progress * clipa.width)
h = math.floor(progress * clipa.height)
if progress == 1:
return clipa_t_zone
elif progress == 0:
return clipb_t_zone
if direction in [Direction.LEFT, Direction.RIGHT]:
if w == 0:
return clipb_t_zone
elif direction in [Direction.UP, Direction.DOWN]:
if h == 0:
return clipb_t_zone
if direction == Direction.LEFT:
cropped_b = clipb_t_zone.std.Crop(left=w)
stack = core.std.StackHorizontal([clipa_t_zone, cropped_b])
return stack.resize.Spline36(width=clipa.width, src_width=clipa.width, src_left=clipa.width - w)
elif direction == Direction.RIGHT:
cropped_b = clipb_t_zone.std.Crop(right=w)
stack = core.std.StackHorizontal([cropped_b, clipa_t_zone])
return stack.resize.Spline36(width=clipa.width, src_width=clipa.width)
elif direction == Direction.UP:
cropped_b = clipb_t_zone.std.Crop(top=h)
stack = core.std.StackVertical([clipa_t_zone, cropped_b])
return stack.resize.Spline36(height=clipa.height, src_height=clipa.height, src_top=clipa.height - h)
elif direction == Direction.DOWN:
cropped_b = clipb_t_zone.std.Crop(bottom=h)
stack = core.std.StackVertical([cropped_b, clipa_t_zone])
return stack.resize.Spline36(height=clipa.height, src_height=clipa.height)
covered = core.std.FrameEval(core.std.BlankClip(clipa, length=frames_), _reveal)
return _return_combo(clipa_clean, covered, clipb_clean)
def curtain_cover(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, axis: Direction = Direction.HORIZONTAL
) -> vs.VideoNode:
"""Second clip comes into view from both directions split along the given axis covering the first clip in place.
`clipb` splits and moves inwards along the given `axis`.
If `axis` is given as :attr:`Direction.HORIZONTAL`, the clips must have an even integer width.
If `axis` is given as :attr:`Direction.VERTICAL`, the clips must have an even integer height.
"""
if axis not in [Direction.HORIZONTAL, Direction.VERTICAL]:
raise ValueError("curtain_cover: give a proper axis")
if axis == Direction.HORIZONTAL and clipa.width % 2:
raise ValueError("curtain_cover: for horizontal reveal, input clips must have an even width")
elif axis == Direction.VERTICAL and clipa.height % 2:
raise ValueError("curtain_cover: for vertical reveal, input clips must have an even height")
frames_ = frames or min(clipa.num_frames, clipb.num_frames)
if TYPE_CHECKING:
assert isinstance(frames_, int)
_check_clips(frames_, curtain_cover, clipa, clipb)
clipa_clean, clipb_clean, clipa_t_zone, clipb_t_zone = _transition_clips(clipa, clipb, frames_)
def _curtain_cover(n: int) -> vs.VideoNode:
progress = Fraction(n, frames_ - 1)
if progress == 0:
return clipa_t_zone
elif progress == 1:
return clipb_t_zone
if axis == Direction.HORIZONTAL:
w = round(float(clipa.width * progress / 2)) * 2
if w == 0:
return clipa_t_zone
elif w == clipa.width:
return clipb_t_zone
clipb_left = clipb_t_zone.std.Crop(right=clipa.width // 2)
clipb_right = clipb_t_zone.std.Crop(left=clipa.width // 2)
clipb_left = clipb_left.std.Crop(left=clipb_left.width - w // 2)
clipb_right = clipb_right.std.Crop(right=clipb_right.width - w // 2)
clipa_cropped = clipa_t_zone.std.Crop(left=clipb_left.width, right=clipb_right.width)
return core.std.StackHorizontal([clipb_left, clipa_cropped, clipb_right])
elif axis == Direction.VERTICAL:
h = round(float(clipa.height * progress / 2)) * 2
if h == 0:
return clipa_t_zone
elif h == clipa.height:
return clipb_t_zone
clipb_top = clipb_t_zone.std.Crop(bottom=clipa.height // 2)
clipb_bottom = clipb_t_zone.std.Crop(top=clipa.height // 2)
clipb_top = clipb_top.std.Crop(top=clipb_top.height - h // 2)
clipb_bottom = clipb_bottom.std.Crop(bottom=clipb_bottom.height - h // 2)
clipa_cropped = clipa_t_zone.std.Crop(top=clipb_top.height, bottom=clipb_bottom.height)
return core.std.StackVertical([clipb_top, clipa_cropped, clipb_bottom])
curtain_covered = core.std.FrameEval(core.std.BlankClip(clipa, length=frames_), _curtain_cover)
return _return_combo(clipa_clean, curtain_covered, clipb_clean)
def curtain_reveal(
clipa: vs.VideoNode, clipb: vs.VideoNode, frames: Optional[int] = None, axis: Direction = Direction.HORIZONTAL
) -> vs.VideoNode:
"""First clip splits apart to reveal the second clip in place.
`clipa` splits and moves apart along the given `axis`.
If `axis` is given as :attr:`Direction.HORIZONTAL`, the clips must have an even integer width.
If `axis` is given as :attr:`Direction.VERTICAL`, the clips must have an even integer height.
"""
if axis not in [Direction.HORIZONTAL, Direction.VERTICAL]:
raise ValueError("curtain_reveal: give a proper axis")
if axis == Direction.HORIZONTAL and clipa.width % 2:
raise ValueError("curtain_reveal: for horizontal reveal, input clips must have an even width")
elif axis == Direction.VERTICAL and clipa.height % 2:
raise ValueError("curtain_reveal: for vertical reveal, input clips must have an even height")
frames_ = frames or min(clipa.num_frames, clipb.num_frames)
if TYPE_CHECKING:
assert isinstance(frames_, int)
_check_clips(frames_, curtain_reveal, clipa, clipb)
clipa_clean, clipb_clean, clipa_t_zone, clipb_t_zone = _transition_clips(clipa, clipb, frames_)
def _curtain_reveal(n: int) -> vs.VideoNode:
progress = Fraction(n, frames_ - 1)
if progress == 0:
return clipa_t_zone
elif progress == 1:
return clipb_t_zone
if axis == Direction.HORIZONTAL:
w = round(float(clipa.width * progress / 2)) * 2
if w == 0:
return clipa_t_zone
elif w == clipa.width:
return clipb_t_zone
clipa_left = clipa_t_zone.std.Crop(right=clipa.width // 2)
clipa_right = clipa_t_zone.std.Crop(left=clipa.width // 2)
clipa_left = clipa_left.std.Crop(left=w // 2)
clipa_right = clipa_right.std.Crop(right=w // 2)
clipb_cropped = clipb_t_zone.std.Crop(left=clipa_left.width, right=clipa_right.width)
return core.std.StackHorizontal([clipa_left, clipb_cropped, clipa_right])
elif axis == Direction.VERTICAL:
h = round(float(clipa.height * progress / 2)) * 2
if h == 0:
return clipa_t_zone
elif h == clipa.height:
return clipb_t_zone
clipa_top = clipa_t_zone.std.Crop(bottom=clipa.height // 2)
clipa_bottom = clipa_t_zone.std.Crop(top=clipa.height // 2)
clipa_top = clipa_top.std.Crop(top=h // 2)
clipa_bottom = clipa_bottom.std.Crop(bottom=h // 2)
clipb_cropped = clipb_t_zone.std.Crop(top=clipa_top.height, bottom=clipa_bottom.height)
return core.std.StackVertical([clipa_top, clipb_cropped, clipa_bottom])
curtain_revealed = core.std.FrameEval(core.std.BlankClip(clipa, length=frames_), _curtain_reveal)
return _return_combo(clipa_clean, curtain_revealed, clipb_clean)
def pixellate(
clipa: vs.VideoNode,
clipb: vs.VideoNode,
frames: Optional[int] = None,
lowest_target_w: Optional[int] = 2,
lowest_target_h: Optional[int] = 2,
) -> vs.VideoNode:
"""Pixellate using rescales and aggressively fade at the center.
For large clips (width `x` height), the effect might not be too noticeable
until the transition is near the middle point.
This is due to bicubic downscales and point re-upscales
at very high percentages of the original dimensions
not being noticeably different.
Due to the way pixellation progress is calculated,
the transition `must` be at least 4 frames long.
Longer transitions paired with larger target dimensions
will cause the pixellation effect to appear to pause towards the center of the transition.
| |
guess."
"\n Out of your periphery you notice a large being approaching."
" You turn suddenly, reaching for your trusty {}"
", but you remember it's not there. All your things, gone."
"\n A large Orcish man approaches you, easily twice your height."
" 'I am the gate keeper here. Why do you wish to leave?'"
". Clearly not looking to fight, he lowers his weapon, a large golden axe."
" 'The road is not safe. The town's only just been built back up."
" We are still clearing the area around the town.' ".format(player_old_wep))
em1 = await self.embed_builder(ctx, current_player, header, option)
await self.bot.say("", embed=em1)
response = await self.loop_checks(ctx)
if not response:
valid = True
return
if '1' in response.content:
await self.bot.say("Please repeat your option.")
valid = False
continue
elif '2' in response.content:
await self.first_adventure_outside(ctx, current_player, player_town)
valid = True
break
elif '2' in response.content:
# Look at the fountain
option = ["Try for the gate. It's time to get out of here.",
"Stay staring at the fountain.",
"Take a closer look at the buildings around you."]
header[0] = ("The Old Fountain")
header[1] = (" It seems no-one has gotten around to sorting this out."
" It has a dirty, worn out plaque underneath it, barely readable."
"\n Still covered in Algae, the base is cracked."
" There are some old coins, some still glittering, most are faded."
"\n The fountain itself is a very plain structure,"
" with three bowl-shaped tiers.")
em1 = await self.embed_builder(ctx, current_player, header, option)
await self.bot.say("", embed=em1)
#TODO interactable that allows you to dive into the fountain for some extra cash.
# naaaah. to lazy
continue
elif '3' in response.content:
# Look around at the buildings.
town_name = player_town['Town_Name']
town_buildings = ", ".join(player_town['Buildings'])
building_info_string = ""
building_info = dataIO.load_json(
"data/discordrpg/buildings.json")
for building in player_town['Buildings']:
info = building_info[building]
building_info_string += "**{}**: *{}*\n".format(building, info['Desc'])
option = ["Try for the gate. It's time to get out of here.",
"Approach the Fountain."]
header[0] = ("{}, the town.".format(town_name))
header[1] = ("Doing a quick roundabout, you notice the town has a {}."
". Nothing all too impressive, but a decent start,"
" you think to yourself."
"\n\n{}".format(town_buildings, building_info_string))
em1 = await self.embed_builder(ctx, current_player, header, option)
await self.bot.say("", embed=em1)
continue
else:
await self.bot.say("No correct response detected. Please try again.")
continue
async def first_adventure_outside(self, ctx, current_player, player_town):
#TODO continue gameplay here
# get location of town. Add 1 to loc_y.
# get tile details for that location.
# from details, return a string of the tile.
# options? default tile interaction options?
user = ctx.message.author
player_race = current_player['Race']
location = player_town['Location']
location['Y'] += 1
tile = await self.map.get_tile_records(user, location)
current_player = await self.player.update_location(user, location)
option = self.default_options
header = ["Title","Description"]
header[0] = "The Green Mile"
header[1] = ("The Gate Keeper reluctantly opens the gate."
" He warns you one last time, you should wait,"
" but you're a {}, and waiting isn't in your blood."
" There is no stone beneath your feet, just cold grass."
" You realise you must be {}.".format(player_race, "in a grassland"))
em1 = await self.embed_builder(ctx, current_player, header, option)
await self.bot.say("", embed=em1)
valid = False
while not valid:
response = await self.loop_checks(ctx)
if not response:
valid = True
return
elif '1' in response.content:
surrounds = await self.map.get_surrounds(user, location)
#TODO Now CUT down. All tiles around are too much detail. Provide a basic for each.
await self.bot.say(surrounds["North"])
async def survey_landscape(self, ctx, user, current_player, option, location):
surrounds = await self.map.get_surrounds(user, location)
header = [] #two value, tile and desc.
option = [] #option list to go with it.
em1 = await self.embed_builder(ctx, current_player, header, option)
# stuff to create the pretty embed.
return em1
async def reload_town_records(self):
self.town = Town(self.bot, self.player, "data/discordrpg/towns.json")
async def exit_check(self, text):
text = text.lower()
if 'exit' in text:
return True
else:
return False
def _login(self, user):
if user.id in self.logged_in_users:
return False
else:
self.logged_in_users.append(user.id)
print(self.logged_in_users)
return True
# TODO return as an embed,
# a login message as well as details last played
# and current location.
async def _logout(self, ctx):
user = ctx.message.author
try:
self.logged_in_users.remove(user.id)
print(self.logged_in_users)
await self.bot.say("Logged out succesfully..")
except:
await self.bot.say("It seems you are not currently logged in. "
"Have you tried `{}rpg play`?".format(ctx.prefix))
async def loop_checks(self, ctx):
user = ctx.message.author
if user.id not in self.logged_in_users:
return False
response = await self.bot.wait_for_message(timeout=600, author=user)
if response is None:
await self._logout(ctx)
return False
if await self.exit_check(response.content):
await self.bot.say(("Are you sure you wish to end the session here?"
" Say `yes` to confirm"))
response = await self.bot.wait_for_message(timeout=600, author=user)
if response is None:
await self._logout(ctx)
return False
elif 'y' in response.content.lower():
await self._logout(ctx)
return False
return response
async def embed_builder(self, ctx, user, header, option):
value = ""
count = 1
for item in option:
value += "`{}.`{}\n".format(count, item)
count += 1
em = discord.Embed(title="{}".format(
header[0]), description="{}".format(header[1]), colour=0xfd0000)
em.add_field(name='Use the numbers to Indicate your Choice.',
value=value, inline=False)
em.set_author(name='{}'.format(user["CharName"]))
em.set_thumbnail(url=user['Avatar'])
em.set_footer(text="Say Exit to logout from this session.")
return em
async def generic_adventure(self, ctx, current_player, player_town):
option = []
em1 = discord.Embed(title="{}".format(
option[0]), description="{}".format(option[1]), colour=0xfd0000)
em1.add_field(name='Use the numbers to Indicate your Choice.', value="`1.`{}\n`2.`{}\n`3.`{}\n`4.`{}\n".format(
option[2], option[3], option[4], option[5]), inline=False)
await self.bot.say("", embed=embed1)
class Player:
def __init__(self, bot, player_path, invent_path):
self.bot = bot
self.playerRoster = dataIO.load_json(player_path)
self.playerInventories = dataIO.load_json(invent_path)
self.monster = Monster(bot, "data/discordrpg/monsters.json")
self.town = Town(bot, self, "data/discordrpg/towns.json")
self.map = Map(self, bot, "data/discordrpg/tiletypes.json",
"data/discordrpg/map.json")
async def check_player(self, userID):
try:
if userID in self.playerRoster:
return True
else:
return False
except:
return False
async def check_inventory(self, userID):
try:
if userID in self.playerInventories:
return True
else:
return False
except:
return False
async def get_player_records(self, userID):
if await self.check_player(userID):
return deepcopy(self.playerRoster[userID])
else:
return None
async def get_player_invent(self, userID):
if self.check_player(userID):
return deepcopy(self.playerInventories[userID])
else:
return False
async def _createplayer(self, ctx):
# order = charname, race, hometown, bio. stats to be inferred from
# race.
author = ctx.message.author
newplayer = {}
race = ""
charname = ""
bio = ""
hometownid = ctx.message.server.id
hometownname = ctx.message.server.name
await self.town.reload_town_records()
town_record = await self.town.get_town_records(hometownid)
print("New player is registering in {} from server {}".format(
town_record['Town_Name'], ctx.message.server.name))
completion = "yes"
embed = discord.Embed(
title="Pick a Class", description="Let's start off by finding what class your character is.", colour=0xff0000)
embed.add_field(
name='Class', value="Choose from the following Classes:", inline=False)
embed.add_field(name='Warrior', value='The Melee Class. Specialising in close quarters combat, the warrior is lethal with a selecton of weapons.\n*Type `1` to select this class.*', inline=False)
embed.add_field(name='Rogue', value='Specialising in ranged combat and steath, the Rogue is Death from a Distance, with a touch of magic to make life easier\n*Type `2` to select this class.*', inline=False)
embed.add_field(
name='Sorcerer', value="Nothing above their power, the arcane arts allow the Sorcerers to bend any element to their will\n*Type `3` to select this class.*", inline=False)
embed.set_thumbnail(
url='http://unrealitymag.com/wp-content/uploads/2011/11/torchlight1.jpg')
await self.bot.say(' ', embed=embed)
raceChoice = await self.bot.wait_for_message(author=author)
if '1' in raceChoice.content:
await self.bot.say("Awesome! Now, What is this Warrior's name?")
race = "Warrior"
elif '2' in raceChoice.content:
await self.bot.say("Awesome! Now, What is this Rogue's name?")
race = "Rogue"
elif '3' in raceChoice.content:
await self.bot.say("Awesome! Now, What is this Sorcerers's name?")
race = "Sorcerer"
charname = await self.bot.wait_for_message(author=author)
charname = charname.content
await self.bot.say("Please provide a short backstory about yourself, {}".format(charname))
bio = await self.bot.wait_for_message(author=author)
await self.bot.say("Great, welcome to {}, {}".format(town_record['Town_Name'], charname))
# TODO add session counter for tut purposes.
newplayer['User'] = author.name
newplayer['HomeTownID'] = hometownid
newplayer['CharName'] = charname
newplayer['Race'] = race
newplayer['Level'] = 1
newplayer['Gold'] = 100
newplayer['Location'] = town_record['Location']
newplayer['Bio'] = bio.content
newplayer['Last_Played'] = 'Never'
newplayer['Sessions'] = 0
if 'W' in race:
newplayer['BaseStats'] = {'HP': 50, 'Mana': 10, 'Stamina': 30}
newplayer[
'Avatar'] = "https://s-media-cache-ak0.pinimg.com/736x/77/02/6b/77026b08f33fb0b4a35434553c4fccc8.jpg"
elif 'R' in race:
newplayer['BaseStats'] = {'HP': 40, 'Mana': 15, 'Stamina': 20}
newplayer[
'Avatar'] = "https://s-media-cache-ak0.pinimg.com/736x/8c/2b/da/8c2bdafd9c5c5b2ec38b81741aa5e879.jpg"
elif 'S' in race:
newplayer['BaseStats'] = {'HP': 35, 'Mana': 30, 'Stamina': 15}
newplayer[
'Avatar'] = "https://s-media-cache-ak0.pinimg.com/originals/c3/e5/25/c3e525a719eaa6ae0df486baa672391c.jpg"
else:
await self.bot.say("Sorry... there seems to be an issue with your class. Please try again.")
return
if await self.check_player(author.id):
self.playerRoster[author.id] | |
[]
six_dof_list[bi]['trans_pred_world'] = trans_pred_world[start:end]
inds_list.append(inds_i)
start = end
bboxes_merge_concat = []
segms_merge_concat = []
car_cls_score_pred_concat = []
quaternion_pred_concat = []
trans_pred_world_concat = []
for ids, bboxes, segms, six_dof in zip(inds_list, bboxes_list, segms_list, six_dof_list):
bboxes_merge_concat.append(bboxes[car_cls_coco][ids])
segms_merge_concat.append(np.array(segms[car_cls_coco])[ids])
car_cls_score_pred_concat.append(six_dof['car_cls_score_pred'][ids])
quaternion_pred_concat.append(six_dof['quaternion_pred'][ids])
trans_pred_world_concat.append(six_dof['trans_pred_world'][ids])
bboxes_merge[car_cls_coco] = np.concatenate(bboxes_merge_concat, axis=0)
segms_merge[car_cls_coco] = np.concatenate(segms_merge_concat, axis=0)
six_dof_merge['car_cls_score_pred'] = np.concatenate(car_cls_score_pred_concat, axis=0)
six_dof_merge['quaternion_pred'] = np.concatenate(quaternion_pred_concat, axis=0)
six_dof_merge['trans_pred_world'] = np.concatenate(trans_pred_world_concat, axis=0)
output_model_merge = (bboxes_merge, segms_merge, six_dof_merge)
if draw_flag:
car_cls_score_pred = six_dof_merge['car_cls_score_pred']
quaternion_pred = six_dof_merge['quaternion_pred']
trans_pred_world = six_dof_merge['trans_pred_world'].copy()
euler_angle = np.array([quaternion_to_euler_angle(x) for x in quaternion_pred])
car_labels = np.argmax(car_cls_score_pred, axis=1)
kaggle_car_labels = [self.unique_car_mode[x] for x in car_labels]
car_names = np.array([car_id2name[x].name for x in kaggle_car_labels])
# img_box_mesh_refined = self.visualise_box_mesh(image,bboxes[car_cls_coco], segms[car_cls_coco],car_names, euler_angle,trans_pred_world_refined)
img_box_mesh_refined, iou_flag = self.visualise_box_mesh(image, bboxes_merge[car_cls_coco],
segms_merge[car_cls_coco], car_names,
euler_angle, trans_pred_world)
imwrite(img_box_mesh_refined,
os.path.join(args.out[:-4] + '_mes_box_vis_merged/' + img_name.split('/')[-1])[
:-4] + '_merged.jpg')
tmp_file = os.path.join(tmp_dir, "{}.pkl".format(last_name[:-4]))
mmcv.dump(output_model_merge, tmp_file)
return output_model_merge
def visualise_pred_merge_postprocessing(self, outputs, args, conf_thred=0.8):
car_cls_coco = 2
test_folder = '/data/home/yyj/code/kaggle/new_code/Kaggle_PKU_Baidu/data/pku_data/test_images/'
## first we have to guarantee the outputs image names keep sequence consistence
output_model_merge = []
for idx, (a, b) in enumerate(zip(outputs[0], outputs[1])):
print(idx)
img_name_a = os.path.basename(a[2]['file_name'])
img_name_b = os.path.basename(b[2]['file_name'])
assert img_name_a == img_name_b
img_name = os.path.join(test_folder, img_name_a)
if not os.path.isfile(img_name):
assert "Image file does not exist!"
else:
image = imread(img_name)
bboxes_a, segms_a, six_dof_a = a[0], a[1], a[2]
bboxes_b, segms_b, six_dof_b = b[0], b[1], b[2]
bboxes_merge = bboxes_a.copy()
segms_merge = segms_a.copy()
six_dof_merge = six_dof_a.copy()
bboxes_a_with_IOU = get_IOU(image, bboxes_a[car_cls_coco], segms_a[car_cls_coco], six_dof_a,
car_id2name, self.car_model_dict, self.unique_car_mode, self.camera_matrix)
bboxes_b_with_IOU = get_IOU(image, bboxes_b[car_cls_coco], segms_b[car_cls_coco], six_dof_b,
car_id2name, self.car_model_dict, self.unique_car_mode, self.camera_matrix)
bboxes_with_IOU = np.concatenate([bboxes_a_with_IOU, bboxes_b_with_IOU], axis=0)
inds = nms_with_IOU(bboxes_with_IOU) ## IOU nms filter out processing return output indices
inds = np.array(inds)
inds_a = inds[np.where(inds < bboxes_a_with_IOU.shape[0])]
inds_b = inds[np.where(inds >= bboxes_a_with_IOU.shape[0])] - bboxes_a_with_IOU.shape[0]
bboxes_merge[car_cls_coco] = np.concatenate(
[bboxes_a[car_cls_coco][inds_a], bboxes_b[car_cls_coco][inds_b]], axis=0)
segms_merge[car_cls_coco] = np.concatenate(
[np.array(segms_a[car_cls_coco])[inds_a], np.array(segms_b[car_cls_coco])[inds_b]], axis=0)
six_dof_merge['car_cls_score_pred'] = np.concatenate(
[six_dof_a['car_cls_score_pred'][inds_a], six_dof_b['car_cls_score_pred'][inds_b]], axis=0)
six_dof_merge['quaternion_pred'] = np.concatenate(
[six_dof_a['quaternion_pred'][inds_a], six_dof_b['quaternion_pred'][inds_b]], axis=0)
six_dof_merge['trans_pred_world'] = np.concatenate(
[six_dof_a['trans_pred_world'][inds_a], six_dof_b['trans_pred_world'][inds_b]], axis=0)
output_model_merge.append((bboxes_merge, segms_merge, six_dof_merge))
car_cls_score_pred = six_dof_merge['car_cls_score_pred']
quaternion_pred = six_dof_merge['quaternion_pred']
trans_pred_world = six_dof_merge['trans_pred_world'].copy()
euler_angle = np.array([quaternion_to_euler_angle(x) for x in quaternion_pred])
car_labels = np.argmax(car_cls_score_pred, axis=1)
kaggle_car_labels = [self.unique_car_mode[x] for x in car_labels]
car_names = np.array([car_id2name[x].name for x in kaggle_car_labels])
# img_box_mesh_refined = self.visualise_box_mesh(image,bboxes[car_cls_coco], segms[car_cls_coco],car_names, euler_angle,trans_pred_world_refined)
img_box_mesh_refined, iou_flag = self.visualise_box_mesh(image, bboxes_merge[car_cls_coco],
segms_merge[car_cls_coco], car_names,
euler_angle, trans_pred_world)
imwrite(img_box_mesh_refined,
os.path.join(args.out[:-4] + '_mes_box_vis_merged/' + img_name.split('/')[-1])[
:-4] + '_merged.jpg')
return output_model_merge
def visualise_box_mesh(self, image, bboxes, segms, car_names, euler_angle, trans_pred_world):
im_combime, iou_flag = draw_box_mesh_kaggle_pku(image,
bboxes,
segms,
car_names,
self.car_model_dict,
self.camera_matrix,
trans_pred_world,
euler_angle)
return im_combime, iou_flag
def visualise_mesh(self, image, bboxes, segms, car_names, euler_angle, trans_pred_world):
im_combime = draw_result_kaggle_pku(image,
bboxes,
segms,
car_names,
self.car_model_dict,
self.camera_matrix,
trans_pred_world,
euler_angle)
return im_combime
def visualise_kaggle(self, img, coords):
# You will also need functions from the previous cells
x_l = 1.02
y_l = 0.80
z_l = 2.31
img = img.copy()
for point in coords:
# Get values
x, y, z = point[3], point[4], point[5]
# yaw, pitch, roll = -pitch, -yaw, -roll
yaw, pitch, roll = point[0], point[1], point[2]
yaw, pitch, roll = -pitch, -yaw, -roll
# Math
Rt = np.eye(4)
t = np.array([x, y, z])
Rt[:3, 3] = t
Rt[:3, :3] = euler_to_Rot(yaw, pitch, roll).T
Rt = Rt[:3, :]
P = np.array([[x_l, -y_l, -z_l, 1],
[x_l, -y_l, z_l, 1],
[-x_l, -y_l, z_l, 1],
[-x_l, -y_l, -z_l, 1],
[0, 0, 0, 1]]).T
img_cor_points = np.dot(self.camera_matrix, np.dot(Rt, P))
img_cor_points = img_cor_points.T
img_cor_points[:, 0] /= img_cor_points[:, 2]
img_cor_points[:, 1] /= img_cor_points[:, 2]
img_cor_points = img_cor_points.astype(int)
# Drawing
img = draw_line(img, img_cor_points)
img = draw_points(img, img_cor_points[-1:])
return img
def clean_corrupted_images(self, annotations):
# For training images, there are 5 corrupted images:
corrupted_images = ['ID_1a5a10365', 'ID_4d238ae90', 'ID_408f58e9f', 'ID_bb1d991f6', 'ID_c44983aeb']
annotations_clean = [ann for ann in annotations if ann['filename'].split('/')[-1][:-4] not in corrupted_images]
return annotations_clean
def clean_outliers(self, annotations):
"""
We get rid of the outliers in this dataset
:
if translation[0] < -80 or translation[0] > 80
or translation[1] < 1 or translation[1] > 50 or
translation[2] < 3 or translation[2] > 150
:param train:
:return:
"""
corrupted_count = 0
clean_count = 0
annotations_clean = []
for idx in range(len(annotations)):
ann = annotations[idx]
bboxes = []
labels = []
eular_angles = []
quaternion_semispheres = []
translations = []
rles = []
for box_idx in range(len(ann['bboxes'])):
translation = ann['translations'][box_idx]
if translation[0] < -80 or translation[0] > 80 or \
translation[1] < 1 or translation[1] > 50 \
or translation[2] < 3 or translation[2] > 150:
corrupted_count += 1
continue
else:
bboxes.append(ann['bboxes'][box_idx])
labels.append(ann['labels'][box_idx])
eular_angles.append(ann['eular_angles'][box_idx])
quaternion_semispheres.append(ann['quaternion_semispheres'][box_idx])
translations.append(ann['translations'][box_idx])
rles.append(ann['rles'][box_idx])
bboxes = np.array(bboxes, dtype=np.float32)
labels = np.array(labels, dtype=np.int64)
eular_angles = np.array(eular_angles, dtype=np.float32)
quaternion_semispheres = np.array(quaternion_semispheres, dtype=np.float32)
translations = np.array(translations, dtype=np.float32)
assert len(bboxes) == len(labels) == len(eular_angles) == len(quaternion_semispheres) == len(translations)
clean_count += len(bboxes)
annotation = {
'filename': ann['filename'],
'width': ann['width'],
'height': ann['height'],
'bboxes': bboxes,
'labels': labels,
'eular_angles': eular_angles,
'quaternion_semispheres': quaternion_semispheres,
'translations': translations,
'rles': rles
}
annotations_clean.append(annotation)
print("Totaly corrupted count is: %d, clean count: %d" % (corrupted_count, clean_count))
return annotations_clean
def group_rectangles(self, annotations,
outfile='/data/Kaggle/bboxes_with_translation_pick.pkl',
draw_flag=True):
"""
This will generate the referenced bboxes for translation regression. Only done onces
:param annotations:
:param outfile:
:param draw_flag:
:return:
"""
bboxes_with_translation = []
for idx in range(len(annotations)):
ann = annotations[idx]
bboxes_with_translation.append(np.concatenate((ann['bboxes'], ann['translations']), axis=1))
bboxes_with_translation = np.vstack(bboxes_with_translation)
print('Total number of cars: %d.' % bboxes_with_translation.shape[0])
# We read an image first
bboxes_with_translation_pick = non_max_suppression_fast(bboxes_with_translation, overlapThresh=0.99)
# Some boxes are outside the boundary, we need to get rid of them:
idx_valid = np.array(bboxes_with_translation_pick[:, 0] <= self.image_shape[1]) & \
np.array(bboxes_with_translation_pick[:, 1] <= self.image_shape[0]) & \
np.array(bboxes_with_translation_pick[:, 0] >= 0) & np.array(
bboxes_with_translation_pick[:, 1] >= 1480)
bboxes_with_translation_pick = bboxes_with_translation_pick[idx_valid]
print('Final number of selected boxed: %d.' % bboxes_with_translation_pick.shape[0])
mmcv.dump(bboxes_with_translation_pick, outfile)
if draw_flag:
img = imread(annotations[0]['filename'])
img_2 = img.copy()
for bb in bboxes_with_translation:
img = cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), color=(0, 255, 0), thickness=1)
imwrite(img, '/data/Kaggle/wudi_data/rect_all.jpg')
for bb in bboxes_with_translation_pick:
img_2 = cv2.rectangle(img_2, (bb[0], bb[1]), (bb[2], bb[3]), color=(0, 255, 0), thickness=1)
imwrite(img_2, '/data/Kaggle/wudi_data/rect_selected.jpg')
def print_statistics_annotations(self, annotations):
"""
Print some statistics from annotations
:param annotations:
:return:
"""
car_per_image = []
xp, yp = [], []
xw, yw, zw = [], [], []
car_models = []
for idx in range(len(annotations)):
ann = annotations[idx]
car_per_image.append(len(ann['bboxes']))
for box_idx in range(len(ann['bboxes'])):
car_models.append(ann['labels'][box_idx])
translation = ann['translations'][box_idx]
xpt, ypt, xwt, ywt, zwt = self._get_img_coords(translation=translation)
xp.append(xpt)
yp.append(ypt)
xw.append(xwt)
yw.append(ywt)
zw.append(zwt)
car_per_image = np.array(car_per_image)
print('Total images: %d, car num sum: %d, minmin: %d, max: %d, mean: %d' %
(len(annotations), car_per_image.sum(), car_per_image.min(), car_per_image.max(), car_per_image.mean()))
"""
Total images: 6691, car num sum: 74029, minmin: 1, max: 43, mean: 11
"""
xp, yp = np.array(xp), np.array(yp)
print("x min: %d, max: %d, mean: %d" % (int(min(xp)), int(max(xp)), int(xp.mean())))
print("y min: %d, max: %d, mean: %d" % (int(min(yp)), int(max(yp)), int(yp.mean())))
"""
x min: -851, max: 4116, mean: 1551
y min: 1482, max: 3427, mean: 1820
"""
xw, yw, zw = np.array(xw), np.array(yw), np.array(zw)
print("x min: %d, max: %d, mean: %d, std: %.3f" % (int(min(xw)), int(max(xw)), int(xw.mean()), xw.std()))
print("y min: %d, max: %d, mean: %d, std: %.3f" % (int(min(yw)), int(max(yw)), int(yw.mean()), yw.std()))
print("z min: %d, max: %d, mean: %d, std: %.3f" % (int(min(zw)), int(max(zw)), int(zw.mean()), zw.std()))
"""
x min: -90, max: 519, mean: -3, std: 14.560
y min: 1, max: 689, mean: 9, std: 6.826
z min: 3, max: 3502, mean: 52, std: 40.046
"""
car_models = np.array(car_models)
print("Car model: max: %d, min: %d, total: %d" % (car_models.max(), car_models.min(), len(car_models)))
# Car model: max: 76, min: 2, total: 49684
print('Unique car models:')
print(np.unique(car_models))
# array([2, 6, 7, 8, 9, 12, 14, 16, 18, 19, 20, 23, 25, 27, 28, 31, 32,
# 35, 37, 40, 43, 46, 47, 48, 50, 51, 54, 56, 60, 61, 66, 70, 71, 76])
print("Number of unique car models: %d" % len(np.unique(car_models)))
# 34
def print_statistics(self, train):
car_per_image = np.array([len(self._str2coords(s)) for s in train['PredictionString']])
print('Total images: %d, car num sum: %d, minmin: %d, max: %d, mean: %d' %
(len(car_per_image), car_per_image.sum(), car_per_image.min(), car_per_image.max(), car_per_image.mean()))
"""
Total images: 4262, car num sum: 49684, minmin: 1, max: 44, mean: 11
"""
xs, ys = [], []
for ps in train['PredictionString']:
x, y = self._get_img_coords(ps)
xs += list(x)
ys += list(y)
xs, ys = np.array(xs), np.array(ys)
print("x min: %d, | |
<reponame>askender/deep_disfluency
# -*- coding: utf-8 -*-
from collections import defaultdict
import re
from nltk import tree
from swda import CorpusReader
from tree_pos_map import TreeMapCorpus
from tree_pos_map import POSMapCorpus
possibleMistranscription = [("its", "it's"),
("Its", "It's"),
("it's", "its"),
("It's", "Its"),
("whose", "who's"),
("Whose", "Who's"),
("who's", "whose"),
("Who's", "Whose"),
("you're", "your"),
("You're", "Your"),
("your", "you're"),
("Your", "You're"),
("their", "they're"),
("Their", "They're"),
("they're", "their"),
("They're", "Their"),
("programme", "program"),
("program", "programme"),
("centre", "center"),
("center", "centre"),
("travelling", "traveling"),
("traveling", "travelling"),
("colouring", "coloring"),
("coloring", "colouring")]
class TreeMapWriter:
"""Object which writes mappings from the words in utterances
to the nodes of the corresponding trees in a treebank
"""
def __init__(self, corpus_path="../swda",
metadata_path="swda-metadata.csv",
target_folder_path="Maps",
ranges=None,
errorLog=None):
print "started TreeMapWriting"
self.write_to_file(corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog)
def write_to_file(self, corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog):
"""Writes files to a target folder with the mappings
from words in utterances to tree nodes in trees.
"""
if errorLog:
errorLog = open(errorLog, 'w')
corpus = CorpusReader(corpus_path, metadata_path)
# Iterate through all transcripts
incorrectTrees = 0
folder = None
corpus_file = None
for trans in corpus.iter_transcripts():
# print "iterating",trans.conversation_no
if not trans.has_pos():
continue
# print "has pos"
if ranges and not trans.conversation_no in ranges:
continue
# print "in range"
# just look at transcripts WITH trees as compliment to the
# below models
if not trans.has_trees():
continue
end = trans.swda_filename.rfind("/")
start = trans.swda_filename.rfind("/", 0, end)
c_folder = trans.swda_filename[start + 1:end]
if c_folder != folder:
# for now splitting the maps by folder
folder = c_folder
if corpus_file:
corpus_file.close()
corpus_file = open(target_folder_path +
"/Tree_map_{0}.csv.text".format(folder), 'w')
wordTreeMapList = TreeMapCorpus(False, errorLog)
print "new map for folder", folder
translist = trans.utterances
translength = len(translist)
count = 0
# iterating through transcript utterance by utterance
# create list of tuples i.e. map from word to the index(ices)
# (possibly multiple or null) of the relevant leaf/ves
# of a given tree i.e. utt.tree[0].leaves[0] would be a pair (0,0))
while count < translength:
utt = trans.utterances[count]
words = utt.text_words()
wordTreeMap = [] # [((word), (List of LeafIndices))]
forwardtrack = 0
backtrack = 0
continued = False
# print "\n COUNT" + str(count)
# print utt.damsl_act_tag()
if len(utt.trees) == 0 or utt.damsl_act_tag() == "x":
wordTreeMap.append((utt, [])) # just dummy value
# errormessage = "WARNING: NO TREE for file/utt: " +\
# str(utt.swda_filename) + " " + utt.caller + "." + \
# str(utt.utterance_index) + "." + \
#str(utt.subutterance_index) + " " + utt.text
# print(errormessage)
count += 1
continue
# raw_input()
# indices for which tree and leaf we're at:
i = 0 # tree
j = 0 # leaf
# initialise pairs of trees and ptb pairs
trees = []
for l in range(0, len(utt.trees)):
trees.append(
(utt.ptb_treenumbers[l], count, l, utt.trees[l]))
# print "TREES = "
# for tree in trees:
# print tree
origtrees = list(trees)
origcount = count
# overcoming the problem of previous utterances contributing
# to the tree at this utterance, we need to add the words from
# the previous utt add in all the words from previous utterance
# with a dialogue act tag/or the same tree?
# check that the last tree in the previous utterance
# is the same as the previous one
previousUttSame = trans.previous_utt_same_speaker(utt)
# print previousUttSame
lastTreeMap = None
if previousUttSame:
# print "search for previous full act utt
# for " + str(utt.swda_filename) +
# str(utt.transcript_index)
lastTreeMap = wordTreeMapList.get_treemap(
trans,
previousUttSame)
if ((not lastTreeMap) or (len(lastTreeMap) == 0) or
(len(lastTreeMap) == 1 and lastTreeMap[0][1] == [])):
# print "no last tree map, backwards searching"
while previousUttSame and \
((not lastTreeMap) or (len(lastTreeMap) == 0) or
(len(lastTreeMap) == 1 and lastTreeMap[0][1] == [])):
previousUttSame = trans.previous_utt_same_speaker(
previousUttSame) # go back one more
lastTreeMap = wordTreeMapList.get_treemap(trans,
previousUttSame)
if previousUttSame:
pass
# print previousUttSame.transcript_index
if not lastTreeMap:
pass
# print "no last treemap found for:"
# print utt.swda_filename
# print utt.transcript_index
if lastTreeMap and \
(utt.damsl_act_tag() == "+" or
(len(lastTreeMap.treebank_numbers) > 0
and lastTreeMap.treebank_numbers[-1] ==
utt.ptb_treenumbers[0])):
continued = True
# might have to backtrack
# now checking for wrong trees
lastPTB = lastTreeMap.treebank_numbers
lastIndexes = lastTreeMap.transcript_numbers
lastTreesTemp = lastTreeMap.get_trees(trans)
lastTrees = []
for i in range(0, len(lastPTB)):
lastTrees.append([lastPTB[i], lastIndexes[i][0],
lastIndexes[i][1], lastTreesTemp[i]])
if not (lastPTB[-1] == utt.ptb_treenumbers[0]):
# print "not same, need to correct!"
# print words
# print trees
# print "last one"
# print previousUttSame.text_words()
# print lastTrees
if utt.ptb_treenumbers[0] - lastPTB[-1] > 1:
# backtrack and redo the antecedent
count = count - (count - lastIndexes[-1][0])
utt = previousUttSame
words = utt.text_words()
mytrees = []
for i in range(0, len(lastTrees) - 1):
mytrees.append(lastTrees[i])
trees = mytrees + [origtrees[0]]
# print "\n(1)backtrack to with new trees:"
backtrack = 1
# print utt.transcript_index
# print words
# print trees
# raw_input()
# alternately, this utt's tree may be further back
# than its antecdent's, rare mistake
elif utt.ptb_treenumbers[0] < lastTrees[-1][0]:
# continue with this utterance and trees
# (if there are any), but replace its first
# tree with its antecdents last one
forwardtrack = 1
trees = [lastTrees[-1]] + origtrees[1:]
# print "\n(2)replacing first one to lasttreemap's:"
# print words
# print trees
# raw_input()
if backtrack != 1: # we should have no match
found_treemap = False
# resetting
# for t in wordTreeMapList.keys():
# print t
# print wordTreeMapList[t]
for t in range(len(lastTreeMap) - 1, -1, -1):
# print lastTreeMap[t][1]
# if there is a leafIndices for the
# word being looked at, gets last mapped one
if len(lastTreeMap[t][1]) > 0:
# print "last treemapping of last
# caller utterance =
# " + str(lastTreeMap[t][1][-1])
j = lastTreeMap[t][1][-1][1] + 1
found_treemap = True
# print "found last mapping, j -1 = " + str(j-1)
# raw_input()
break
if not found_treemap:
pass
# print "NO matched last TREEMAP found for \
# previous Utt Same Speaker of " + \
# str(trans.swda_filename) + " " + \
# str(utt.transcript_index)
# print lastTreeMap
# for tmap in wordTreeMapList.keys():
# print tmap
# print wordTreeMapList[tmap]
# raw_input()
possibleComment = False # can have comments, flag
mistranscribe = False
LeafIndices = [] # possibly empty list of leaf indices
word = words[0]
# loop until no more words left to be matched in utterance
while len(words) > 0:
# print "top WORD:" + word
if not mistranscribe:
wordtest = re.sub(r"[\.\,\?\"\!]", "", word)
wordtest = wordtest.replace("(", "").replace(")", "")
match = False
LeafIndices = [] # possibly empty list of leaf indices
if (possibleComment
or word[0:1] in ["{", "}", "-"]
or word in ["/", ".", ",", "]"]
or wordtest == ""
or any([x in word for x in ["<", ">", "*", "[", "+", "]]",
"...", "#", "="]])):
# no tree equivalent for {D } type annotations
if (word[0:1] == "-" or
any([x in word for x in
["*", "<<", "<+", "[[", "<"]])) \
and not possibleComment:
possibleComment = True
if possibleComment:
#print("match COMMENT!:" + word)
# raw_input()
LeafIndices = []
match = True
#wordTreeMap.append((word, LeafIndices))
if any([x in word for x in [">>", "]]", ">"]]) or \
word[0] == "-": # turn off comment
possibleComment = False
#del words[0]
# LeadIndices will be null here
wordTreeMap.append((word, LeafIndices))
LeafIndices = []
match = True
# print "match annotation!:" + word
del words[0] # word is consumed, should always be one
if len(words) > 0:
word = words[0]
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!]", "", word)
wordtest = wordtest.replace("(", "")
wordtest = wordtest.replace(")", "")
else:
break
continue
# carry on to next word without updating indices?
else:
while i < len(trees):
# print "i number of trees :" + str(len(utt.trees))
# print "i tree number :" + str(i)
# print "i loop word :" + word
tree = trees[i][3]
# print "looking at ptb number " + str(trees[i][0])
# print "looking at index number " \
#+ str(trees[i][1])+","+str(trees[i][2])
while j < len(tree.leaves()):
leaf = tree.leaves()[j]
# print "j number of leaves : " \
#+ str(len(tree.leaves()))
# print "j loop word : | |
more files are missing! Abort!"
)
split_files.append((ref_name, o, gff_file, group_file, mega_file))
use_fq = False
if ref_fq is not None and addon_fq is not None:
use_fq = True
ref_fq_dict = {
r.id.split("|")[0]: r for r in SeqIO.parse(open(ref_fq), "fastq")
}
addon_fq_dict = {
r.id.split("|")[0]: r for r in SeqIO.parse(open(addon_fq), "fastq")
}
mega_info = {} # ref id -> list of matching query_id, or empty list
split_unmatched = set()
for (ref_name, split_name, gff_file, group_file, mega_file) in split_files:
for r in DictReader(open(mega_file), delimiter="\t"):
if r[ref_name] != "NA":
if r[ref_name] not in mega_info:
mega_info[r[ref_name]] = []
if r[split_name] != "NA":
mega_info[r[ref_name]].append(r[split_name])
else: # ref is NA, non-ref is not NA
split_unmatched.add(r[split_name])
# make a rec list of matches of (ref_id, addon_id, representative record, combined group info) where rec_ref or ref_addon could be None, but not both
rec_list = []
d_ref = {r.seqid: r for r in GFF.collapseGFFReader(ref_gff)}
d_addon = {r.seqid: r for r in GFF.collapseGFFReader(addon_gff)}
ref_group_info = sp.MegaPBTree.read_group(ref_group, None)
addon_group_info = sp.MegaPBTree.read_group(addon_group, None)
for ref_id, matches in mega_info.items():
if len(matches) == 0:
rec_list.append(
sp.MatchRecord(
ref_id=ref_id,
addon_id="NA",
rec=d_ref[ref_id],
members=ref_group_info[ref_id],
seqrec=ref_fq_dict[ref_id] if use_fq else None,
)
)
else:
for addon_id in matches:
r1 = d_ref[ref_id]
r2 = d_addon[addon_id]
if (r1.end - r1.start) > (r2.end - r2.start):
rec_list.append(
sp.MatchRecord(
ref_id=ref_id,
addon_id=addon_id,
rec=r1,
members=ref_group_info[ref_id] + addon_group_info[addon_id],
seqrec=ref_fq_dict[ref_id] if use_fq else None,
)
)
else:
rec_list.append(
sp.MatchRecord(
ref_id=ref_id,
addon_id=addon_id,
rec=r2,
members=ref_group_info[ref_id] + addon_group_info[addon_id],
seqrec=addon_fq_dict[addon_id] if use_fq else None,
)
)
for addon_id in split_unmatched:
rec_list.append(
sp.MatchRecord(
ref_id="NA",
addon_id=addon_id,
rec=d_addon[addon_id],
members=addon_group_info[addon_id],
seqrec=addon_fq_dict[addon_id] if use_fq else None,
)
)
sp.write_reclist_to_gff_n_info(rec_list, final_prefix, ref_name, addon_name, use_fq)
for (ref_name, split_name, gff_file, group_file, mega_file) in split_files:
gff_file.unlink()
group_file.unlink()
mega_file.unlink()
def chain_samples(
dirs,
names,
group_filename,
gff_filename,
count_filename,
field_to_use="count_fl",
fuzzy_junction=0,
allow_5merge=False,
max_3_diff=100,
fastq_filename=None,
):
for d in dirs.values():
sample_sanity_check(
Path(d, group_filename),
Path(d, gff_filename),
Path(d, count_filename),
Path(d, fastq_filename) if fastq_filename is not None else None,
)
count_info = read_count_info(count_filename, dirs, field_to_use)
# some names may already start with "tmp_" which means they are intermediate results that have already been chained
# find the first non "tmp_" and start from there
if names[0].startswith("tmp_"):
chain = []
for start_i, name in enumerate(names):
if name.startswith("tmp_"):
chain.append(name[4:])
else:
break
# start_i, name now points at the first "non-tmp" sample
# we want to go to the last tmp_ sample and read it
name = names[start_i - 1][4:] # this is the last tmp_ sample, let's read it
o = sp.MegaPBTree(
f"tmp_{name}.gff",
f"tmp_{name}.group.txt",
self_prefix=f"tmp_{name}",
internal_fuzzy_max_dist=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
fastq_filename=f"tmp_{name}.rep.fq" if fastq_filename is not None else None,
)
# chain.append(name) # no need, already done above
else: # everything is new, start fresh
name = names[0]
d = Path(dirs[name])
chain = [name]
o = sp.MegaPBTree(
d.joinpath(gff_filename),
d.joinpath(group_filename),
self_prefix=name,
internal_fuzzy_max_dist=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
fastq_filename=d.joinpath(fastq_filename)
if fastq_filename is not None
else None,
)
start_i = 1
for name in names[start_i:]:
if name.startswith("tmp_"):
raise AssertionError("trying to add a temp file!")
d = Path(dirs[name])
o.add_sample(
d.joinpath(gff_filename),
d.joinpath(group_filename),
sample_prefix=name,
output_prefix=f"tmp_{name}",
fastq_filename=d.joinpath(fastq_filename)
if fastq_filename is not None
else None,
)
o = sp.MegaPBTree(
f"tmp_{name}.gff",
f"tmp_{name}.group.txt",
self_prefix=f"tmp_{name}",
internal_fuzzy_max_dist=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
fastq_filename=f"tmp_{name}.rep.fq" if fastq_filename is not None else None,
)
chain.append(name)
# now recursively chain back by looking at mega_info.txt!!!
d = {} # ex: (tmp_1009, PB.1.1) --> mega info dict
for c in chain[1:]:
for r in DictReader(open(f"tmp_{c}.mega_info.txt"), delimiter="\t"):
d[f"tmp_{c}", r["superPBID"]] = r
with open("all_samples.chained_ids.txt", "w") as f1, open(
"all_samples.chained_count.txt", "w"
) as f2:
writer1 = DictWriter(f1, fieldnames=["superPBID"] + chain, delimiter="\t")
writer1.writeheader()
writer2 = DictWriter(f2, fieldnames=["superPBID"] + chain, delimiter="\t")
writer2.writeheader()
reader = DictReader(open(f"tmp_{chain[-1]}.mega_info.txt"), delimiter="\t")
for r in reader:
saw_NA = False
r0 = r
answer = defaultdict(lambda: "NA") # ex: 1009 --> PB.1.1
answer2 = defaultdict(lambda: "NA") # ex: 1009 --> count
answer[chain[-1]] = r[chain[-1]]
if r[chain[-1]] != "NA":
answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
for c in chain[::-1][
1:-1
]: # the first sample does not have tmp_, because it's not a chain
if r[f"tmp_{c}"] == "NA":
saw_NA = True
break
else:
r2 = d[f"tmp_{c}", r[f"tmp_{c}"]]
answer[c] = r2[c]
if answer[c] != "NA":
answer2[c] = count_info[c, answer[c]]
r = r2
if not saw_NA:
answer[chain[0]] = r[chain[0]]
if answer[chain[0]] != "NA":
answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]
rec1 = {"superPBID": r0["superPBID"]}
rec2 = {"superPBID": r0["superPBID"]}
for c in chain:
rec1[c] = answer[c]
rec2[c] = str(answer2[c])
writer1.writerow(rec1)
writer2.writerow(rec2)
shutil.copyfile(f"tmp_{chain[-1]}.gff", "all_samples.chained.gff")
if fastq_filename is not None:
shutil.copyfile(f"tmp_{chain[-1]}.rep.fq", "all_samples.chained.rep.fq")
logger.info("Chained output written to:")
logger.info("all_samples.chained.gff")
logger.info(f1.name)
logger.info(f2.name)
if fastq_filename is not None:
logger.info("all_samples.chained.rep.fq")
def chain_samples_multithread(
dirs: Path,
names: List[str],
group_filename: str,
gff_filename: str,
count_filename: str,
field_to_use: str = "count_fl",
fuzzy_junction: int = 0,
allow_5merge: bool = False,
max_3_diff: int = 100,
fastq_filename: Optional[str] = None,
cpus: int = 4,
):
for d in dirs.values():
sample_sanity_check(
Path(d, group_filename),
Path(d, gff_filename),
Path(d, count_filename),
Path(d, fastq_filename) if fastq_filename is not None else None,
)
count_info = read_count_info(count_filename, dirs, field_to_use)
# some names may already start with "tmp_" which means they are intermediate results that have already been chained
# find the first non "tmp_" and start from there
if names[0].startswith("tmp_"):
chain = []
for start_i, name in enumerate(names):
if name.startswith("tmp_"):
chain.append(name[4:])
else:
break
# start_i, name now points at the first "non-tmp" sample
# we want to go to the last tmp_ sample and read it
name = names[start_i - 1][4:] # this is the last tmp_ sample, let's read it
first_add = False
else: # everything is new, start fresh
name = names[0]
chain = [name]
start_i = 1
first_add = True
for addon_name in names[start_i:]:
if addon_name.startswith("tmp_"):
raise NotImplementedError(
"Chaining intermediate with unchained files is currently not implemented"
)
ref_name = chain[-1]
ref_d = dirs[ref_name]
if first_add:
ref_gff = ref_d.joinpath(gff_filename)
ref_group = ref_d.joinpath(group_filename)
ref_fq = (
ref_d.joinpath(fastq_filename) if fastq_filename is not None else None
)
else:
ref_name = f"tmp_{ref_name}"
ref_gff = f"{ref_name}.gff"
ref_group = f"{ref_name}.group.txt"
ref_fq = f"{ref_name}.rep.fq" if fastq_filename is not None else None
addon_d = dirs[addon_name]
addon_gff = addon_d.joinpath(gff_filename)
addon_group = addon_d.joinpath(group_filename)
addon_fq = (
addon_d.joinpath(fastq_filename) if fastq_filename is not None else None
)
split_outs, split_ins = chain_split_file(
ref_gff=ref_gff,
ref_group=ref_group,
ref_name=ref_name,
addon_gff=addon_gff,
addon_group=addon_group,
addon_name=addon_name,
fuzzy_junction=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
n_chunks=cpus,
)
combine_split_chained_results(
output_prefixes=split_outs,
final_prefix=f"tmp_{addon_name}",
ref_gff=ref_gff,
ref_group=ref_group,
ref_name=ref_name,
ref_fq=ref_fq,
addon_gff=addon_gff,
addon_group=addon_group,
addon_name=addon_name,
addon_fq=addon_fq,
)
chain.append(addon_name)
for in_gff_split, in_group_split in split_ins:
Path(in_gff_split).unlink() # remove the split gff
Path(in_group_split).unlink()
first_add = False
# now recursively chain back by looking at mega_info.txt!!!
d = {} # ex: (tmp_sample1, PB.1.1) --> mega info dict
for c in chain[1:]:
for r in DictReader(open(f"tmp_{c}.mega_info.txt"), delimiter="\t"):
d[f"tmp_{c}", r["superPBID"]] = r
with open("all_samples.chained_ids.txt", "w") as f1, open(
"all_samples.chained_count.txt", "w"
) as f2:
writer1 = DictWriter(f1, fieldnames=["superPBID"] + chain, delimiter="\t")
writer1.writeheader()
writer2 = DictWriter(f2, fieldnames=["superPBID"] + chain, delimiter="\t")
writer2.writeheader()
reader = DictReader(open(f"tmp_{chain[-1]}.mega_info.txt"), delimiter="\t")
for r in reader:
saw_NA = False
r0 = r
answer = defaultdict(lambda: "NA") # ex: 1009 --> PB.1.1
answer2 = defaultdict(lambda: "NA") # ex: 1009 --> count
answer[chain[-1]] = r[chain[-1]]
if r[chain[-1]] != "NA":
answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
for c in chain[::-1][
1:-1
]: # the first sample does not have tmp_, because it's not a chain
if r[f"tmp_{c}"] == "NA":
saw_NA = True
break
else:
r2 = d[f"tmp_{c}", r[f"tmp_{c}"]]
answer[c] = r2[c]
if answer[c] != "NA":
answer2[c] = count_info[c, answer[c]]
r = r2
if not saw_NA:
answer[chain[0]] = r[chain[0]]
if answer[chain[0]] != "NA":
answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]
rec1 = {"superPBID": r0["superPBID"]}
rec2 = {"superPBID": r0["superPBID"]}
for c in chain:
rec1[c] = answer[c]
rec2[c] = str(answer2[c])
writer1.writerow(rec1)
writer2.writerow(rec2)
shutil.copyfile(f"tmp_{chain[-1]}.gff", "all_samples.chained.gff")
if fastq_filename is not None:
shutil.copyfile(f"tmp_{chain[-1]}.rep.fq", "all_samples.chained.rep.fq")
logger.info("Chained output written to:")
logger.info("all_samples.chained.gff")
logger.info(f1.name)
logger.info(f2.name)
if fastq_filename is not None:
logger.info("all_samples.chained.rep.fq")
@app.command(name="")
def main(
config_file: str = typer.Argument(...),
field_to_use: fl_fields = typer.Option(
fl_fields.count_fl,
show_default=False,
help="Which count field to use for chained sample (default: count_fl)",
),
fuzzy_junction: int = typer.Option(
0,
show_default=False,
help="Max allowed distance in junction to be considered identical (default: 0 bp)",
),
allow_5merge: bool = typer.Option(
True,
"--dun-merge-5-shorter",
show_default=False,
help="Don't collapse shorter 5' transcripts (default: off)",
), # store_false
max_3_diff: int = typer.Option(
30, show_default=False, help="Maximum 3' difference allowed (default: 30bp)"
),
cpus: int = typer.Option(
8,
show_default=False,
help="Number of CPUs to use for | |
which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks
@property
def differing_blocks(self):
"""
:returns: A list of block matches which appear to differ
"""
differing_blocks = []
for (block_a, block_b) in self._block_matches:
if not self.blocks_probably_identical(block_a, block_b):
differing_blocks.append((block_a, block_b))
return differing_blocks
@property
def blocks_with_differing_constants(self):
"""
:return: A list of block matches which appear to differ
"""
differing_blocks = []
diffs = dict()
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b) and \
not self.blocks_probably_identical(block_a, block_b, check_constants=True):
differing_blocks.append((block_a, block_b))
for block_a, block_b in differing_blocks:
ba = NormalizedBlock(block_a, self._function_a)
bb = NormalizedBlock(block_b, self._function_b)
diffs[(block_a, block_b)] = FunctionDiff._block_diff_constants(ba, bb)
return diffs
@property
def block_matches(self):
return self._block_matches
@property
def unmatched_blocks(self):
return self._unmatched_blocks_from_a, self._unmatched_blocks_from_b
@staticmethod
def get_normalized_block(addr, function):
"""
:param addr: Where to start the normalized block.
:param function: A function containing the block address.
:returns: A normalized basic block.
"""
return NormalizedBlock(addr, function)
def block_similarity(self, block_a, block_b):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:returns: The similarity of the basic blocks, normalized for the base address of the block and function
call addresses.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
if self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]:
return 1.0
else:
return 0.0
try:
block_a = NormalizedBlock(block_a, self._function_a)
except (SimMemoryError, SimEngineError):
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except (SimMemoryError, SimEngineError):
block_b = None
# if both were None then they are assumed to be the same, if only one was the same they are assumed to differ
if block_a is None and block_b is None:
return 1.0
elif block_a is None or block_b is None:
return 0.0
# get all elements for computing similarity
tags_a = [s.tag for s in block_a.statements]
tags_b = [s.tag for s in block_b.statements]
consts_a = [c.value for c in block_a.all_constants]
consts_b = [c.value for c in block_b.all_constants]
all_registers_a = [s.offset for s in block_a.statements if hasattr(s, "offset")]
all_registers_b = [s.offset for s in block_b.statements if hasattr(s, "offset")]
jumpkind_a = block_a.jumpkind
jumpkind_b = block_b.jumpkind
# compute total distance
total_dist = 0
total_dist += _levenshtein_distance(tags_a, tags_b)
total_dist += _levenshtein_distance(block_a.operations, block_b.operations)
total_dist += _levenshtein_distance(all_registers_a, all_registers_b)
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
total_dist += _normalized_levenshtein_distance(consts_a, consts_b, acceptable_differences)
total_dist += 0 if jumpkind_a == jumpkind_b else 1
# compute similarity
num_values = max(len(tags_a), len(tags_b))
num_values += max(len(consts_a), len(consts_b))
num_values += max(len(block_a.operations), len(block_b.operations))
num_values += 1 # jumpkind
similarity = 1 - (float(total_dist) / num_values)
return similarity
def blocks_probably_identical(self, block_a, block_b, check_constants=False):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:param check_constants: Whether or not to require matching constants in blocks.
:returns: Whether or not the blocks appear to be identical.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
return self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]
try:
block_a = NormalizedBlock(block_a, self._function_a)
except (SimMemoryError, SimEngineError):
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except (SimMemoryError, SimEngineError):
block_b = None
# if both were None then they are assumed to be the same, if only one was None they are assumed to differ
if block_a is None and block_b is None:
return True
elif block_a is None or block_b is None:
return False
# if they represent a different number of blocks they are not the same
if len(block_a.blocks) != len(block_b.blocks):
return False
# check differing constants
try:
diff_constants = FunctionDiff._block_diff_constants(block_a, block_b)
except UnmatchedStatementsException:
return False
if not check_constants:
return True
# get values of differences that probably indicate no change
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
# todo match globals
for c in diff_constants:
if (c.value_a, c.value_b) in self._block_matches:
# constants point to matched basic blocks
continue
if self._bindiff is not None and (c.value_a and c.value_b) in self._bindiff.function_matches:
# constants point to matched functions
continue
# if both are in the binary we'll assume it's okay, although we should really match globals
# TODO use global matches
if self._project_a.loader.main_object.contains_addr(c.value_a) and \
self._project_b.loader.main_object.contains_addr(c.value_b):
continue
# if the difference is equal to the difference in block addr's or successor addr's we'll say it's also okay
if c.value_b - c.value_a in acceptable_differences:
continue
# otherwise they probably are different
return False
# the blocks appear to be identical
return True
@staticmethod
def _block_diff_constants(block_a, block_b):
diff_constants = []
for irsb_a, irsb_b in zip(block_a.blocks, block_b.blocks):
diff_constants += differing_constants(irsb_a, irsb_b)
return diff_constants
@staticmethod
def _compute_block_attributes(function):
"""
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
"""
# The attributes we use are the distance form function start, distance from function exit and whether
# or not it has a subfunction call
distances_from_start = FunctionDiff._distances_from_function_start(function)
distances_from_exit = FunctionDiff._distances_from_function_exit(function)
call_sites = function.call_sites
attributes = {}
for block in function.graph.nodes():
if block in call_sites:
number_of_subfunction_calls = len(call_sites[block])
else:
number_of_subfunction_calls = 0
# there really shouldn't be blocks that can't be reached from the start, but there are for now
dist_start = distances_from_start[block] if block in distances_from_start else 10000
dist_exit = distances_from_exit[block] if block in distances_from_exit else 10000
attributes[block] = (dist_start, dist_exit, number_of_subfunction_calls)
return attributes
@staticmethod
def _distances_from_function_start(function):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the start of the function.
"""
return networkx.single_source_shortest_path_length(function.graph,
function.startpoint)
@staticmethod
def _distances_from_function_exit(function):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the exit of the function.
"""
reverse_graph = function.graph.reverse()
# we aren't guaranteed to have an exit from the function so explicitly add the node
reverse_graph.add_node("start")
found_exits = False
for n in function.graph.nodes():
if len(list(function.graph.successors(n))) == 0:
reverse_graph.add_edge("start", n)
found_exits = True
# if there were no exits (a function with a while 1) let's consider the block with the highest address to
# be the exit. This isn't the most scientific way, but since this case is pretty rare it should be okay
if not found_exits:
last = max(function.graph.nodes(), key=lambda x:x.addr)
reverse_graph.add_edge("start", last)
dists = networkx.single_source_shortest_path_length(reverse_graph, "start")
# remove temp node
del dists["start"]
# correct for the added node
for n in dists:
dists[n] -= 1
return dists
def _compute_diff(self):
"""
Computes the diff of the functions and saves the result.
"""
# get the attributes for all blocks
l.debug("Computing diff of functions: %s, %s",
("%#x" % self._function_a.startpoint.addr) if self._function_a.startpoint is not None else "None",
("%#x" % self._function_b.startpoint.addr) if self._function_b.startpoint is not None else "None"
)
self.attributes_a = self._compute_block_attributes(self._function_a)
self.attributes_b = self._compute_block_attributes(self._function_b)
# get the initial matches
initial_matches = self._get_block_matches(self.attributes_a, self.attributes_b,
tiebreak_with_block_similarity=False)
# Use a queue so we process matches in the order that they are found
to_process = deque(initial_matches)
# Keep track of which matches we've already added to the queue
processed_matches = set((x, y) for (x, y) in initial_matches)
# Keep a dict of current matches, which will be updated if better matches are found
matched_a = dict()
matched_b = dict()
for (x, y) in processed_matches:
matched_a[x] = y
matched_b[y] = x
# while queue is not empty
while to_process:
(block_a, block_b) = to_process.pop()
l.debug("FunctionDiff: Processing (%#x, %#x)", block_a.addr, block_b.addr)
# we could find new matches in the successors or predecessors of functions
block_a_succ = list(self._function_a.graph.successors(block_a))
block_b_succ = list(self._function_b.graph.successors(block_b))
block_a_pred = list(self._function_a.graph.predecessors(block_a))
block_b_pred = list(self._function_b.graph.predecessors(block_b))
# propagate the difference in blocks as delta
delta = tuple((i-j) for i, j in zip(self.attributes_b[block_b], self.attributes_a[block_a]))
# get possible new matches
new_matches = []
# if the blocks are identical then the successors should most likely be matched in the same order
if self.blocks_probably_identical(block_a, block_b) and len(block_a_succ) == len(block_b_succ):
ordered_succ_a = self._get_ordered_successors(self._project_a, block_a, block_a_succ)
ordered_succ_b = self._get_ordered_successors(self._project_b, block_b, block_b_succ)
new_matches.extend(zip(ordered_succ_a, ordered_succ_b))
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_succ, block_b_succ,
delta, tiebreak_with_block_similarity=True)
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_pred, block_b_pred,
delta, tiebreak_with_block_similarity=True)
# for each of the possible new matches add it if it improves the matching
for (x, y) in new_matches:
if (x, y) not in processed_matches:
processed_matches.add((x, y))
l.debug("FunctionDiff: checking if (%#x, %#x) is better", x.addr, y.addr)
# | |
)
tmp = list(set(self._values['gather_subset']))
tmp.sort()
self._values['gather_subset'] = tmp
return self._values['gather_subset']
class BaseParameters(Parameters):
@property
def enabled(self):
return flatten_boolean(self._values['enabled'])
@property
def disabled(self):
return flatten_boolean(self._values['disabled'])
def _remove_internal_keywords(self, resource):
resource.pop('kind', None)
resource.pop('generation', None)
resource.pop('selfLink', None)
resource.pop('isSubcollection', None)
resource.pop('fullPath', None)
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class ApplicationsParameters(BaseParameters):
api_map = {
'protectionMode': 'protection_mode',
'transactionsPerSecond': 'transactions_per_second',
'newConnections': 'new_connections',
'responseTime': 'response_time',
'activeAlerts': 'active_alerts',
'badTraffic': 'bad_traffic',
'enhancedAnalytics': 'enhanced_analytics',
'badTrafficGrowth': 'bad_traffic_growth'
}
returnables = [
'protection_mode',
'id',
'name',
'status',
'transactions_per_second',
'connections',
'new_connections',
'response_time',
'health',
'active_alerts',
'bad_traffic',
'enhanced_analytics',
'bad_traffic_growth',
]
@property
def enhanced_analytics(self):
return flatten_boolean(self._values['enhanced_analytics'])
@property
def bad_traffic_growth(self):
return flatten_boolean(self._values['bad_traffic_growth'])
class ApplicationsFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(ApplicationsFactManager, self).__init__(**kwargs)
self.want = ApplicationsParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(applications=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = ApplicationsParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['result']['items']
except KeyError:
return []
class ManagedDevicesParameters(BaseParameters):
api_map = {
'deviceUri': 'device_uri',
'groupName': 'group_name',
'httpsPort': 'https_port',
'isClustered': 'is_clustered',
'isLicenseExpired': 'is_license_expired',
'isVirtual': 'is_virtual',
'machineId': 'machine_id',
'managementAddress': 'management_address',
'mcpDeviceName': 'mcp_device_name',
'restFrameworkVersion': 'rest_framework_version',
'selfLink': 'self_link',
'trustDomainGuid': 'trust_domain_guid',
}
returnables = [
'address',
'build',
'device_uri',
'edition',
'group_name',
'hostname',
'https_port',
'is_clustered',
'is_license_expired',
'is_virtual',
'machine_id',
'management_address',
'mcp_device_name',
'product',
'rest_framework_version',
'self_link',
'slots',
'state',
'tags',
'trust_domain_guid',
'uuid',
'version',
]
@property
def slots(self):
result = []
if self._values['slots'] is None:
return None
for x in self._values['slots']:
x['is_active'] = flatten_boolean(x.pop('isActive', False))
result.append(x)
return result
@property
def tags(self):
if self._values['tags'] is None:
return None
result = dict((x['name'], x['value']) for x in self._values['tags'])
return result
@property
def https_port(self):
return int(self._values['https_port'])
@property
def is_clustered(self):
return flatten_boolean(self._values['is_clustered'])
@property
def is_license_expired(self):
return flatten_boolean(self._values['is_license_expired'])
@property
def is_virtual(self):
return flatten_boolean(self._values['is_virtual'])
class ManagedDevicesFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(ManagedDevicesFactManager, self).__init__(**kwargs)
self.want = ManagedDevicesParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(managed_devices=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['hostname'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = ManagedDevicesParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' not in response:
return []
result = response['items']
return result
class PurchasedPoolLicensesParameters(BaseParameters):
api_map = {
'baseRegKey': 'base_reg_key',
'freeDeviceLicenses': 'free_device_licenses',
'licenseState': 'license_state',
'totalDeviceLicenses': 'total_device_licenses',
}
returnables = [
'base_reg_key',
'dossier',
'free_device_licenses',
'name',
'state',
'total_device_licenses',
'uuid',
# license_state facts
'vendor',
'licensed_date_time',
'licensed_version',
'evaluation_start_date_time',
'evaluation_end_date_time',
'license_end_date_time',
'license_start_date_time',
'registration_key',
]
@property
def registration_key(self):
try:
return self._values['license_state']['registrationKey']
except KeyError:
return None
@property
def license_start_date_time(self):
try:
return self._values['license_state']['licenseStartDateTime']
except KeyError:
return None
@property
def license_end_date_time(self):
try:
return self._values['license_state']['licenseEndDateTime']
except KeyError:
return None
@property
def evaluation_end_date_time(self):
try:
return self._values['license_state']['evaluationEndDateTime']
except KeyError:
return None
@property
def evaluation_start_date_time(self):
try:
return self._values['license_state']['evaluationStartDateTime']
except KeyError:
return None
@property
def licensed_version(self):
try:
return self._values['license_state']['licensedVersion']
except KeyError:
return None
@property
def licensed_date_time(self):
try:
return self._values['license_state']['licensedDateTime']
except KeyError:
return None
@property
def vendor(self):
try:
return self._values['license_state']['vendor']
except KeyError:
return None
class PurchasedPoolLicensesFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(PurchasedPoolLicensesFactManager, self).__init__(**kwargs)
self.want = PurchasedPoolLicensesParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(purchased_pool_licenses=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = PurchasedPoolLicensesParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/purchased-pool/licenses".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
class RegkeyPoolsParameters(BaseParameters):
api_map = {
}
returnables = [
'name',
'id',
'offerings',
'total_offerings',
]
class RegkeyPoolsOfferingParameters(BaseParameters):
api_map = {
'regKey': 'registration_key',
'licenseState': 'license_state',
'status': 'state',
}
returnables = [
'name',
'dossier',
'state',
# license_state facts
'licensed_date_time',
'licensed_version',
'evaluation_start_date_time',
'evaluation_end_date_time',
'license_end_date_time',
'license_start_date_time',
'registration_key',
]
@property
def registration_key(self):
try:
return self._values['license_state']['registrationKey']
except KeyError:
return None
@property
def license_start_date_time(self):
try:
return self._values['license_state']['licenseStartDateTime']
except KeyError:
return None
@property
def license_end_date_time(self):
try:
return self._values['license_state']['licenseEndDateTime']
except KeyError:
return None
@property
def evaluation_end_date_time(self):
try:
return self._values['license_state']['evaluationEndDateTime']
except KeyError:
return None
@property
def evaluation_start_date_time(self):
try:
return self._values['license_state']['evaluationStartDateTime']
except KeyError:
return None
@property
def licensed_version(self):
try:
return self._values['license_state']['licensedVersion']
except KeyError:
return None
@property
def licensed_date_time(self):
try:
return self._values['license_state']['licensedDateTime']
except KeyError:
return None
@property
def vendor(self):
try:
return self._values['license_state']['vendor']
except KeyError:
return None
class RegkeyPoolsFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(RegkeyPoolsFactManager, self).__init__(**kwargs)
self.want = RegkeyPoolsParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(regkey_pools=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = RegkeyPoolsParameters(params=resource)
offerings = self.read_offerings_from_device(resource['id'])
params.update({'total_offerings': len(offerings)})
for offering in offerings:
params2 = RegkeyPoolsOfferingParameters(params=offering)
params.update({'offerings': params2.to_return()})
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
def read_offerings_from_device(self, license):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings".format(
self.client.provider['server'],
self.client.provider['server_port'],
license,
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
class SystemInfoParameters(BaseParameters):
api_map = {
'isSystemSetup': 'is_system_setup',
'isAdminPasswordChanged': '<PASSWORD>',
'isRootPasswordChanged': '<PASSWORD>'
}
returnables = [
'base_mac_address',
'chassis_serial',
'hardware_information',
'host_board_part_revision',
'host_board_serial',
'is_admin_password_changed',
'is_root_password_changed',
'is_system_setup',
'marketing_name',
'package_edition',
'package_version',
'platform',
'product_build',
'product_build_date',
'product_built',
'product_changelist',
'product_code',
'product_information',
'product_jobid',
'product_version',
'switch_board_part_revision',
'switch_board_serial',
'time',
'uptime',
]
@property
def is_admin_password_changed(self):
return flatten_boolean(self._values['is_admin_password_changed'])
@property
def is_root_password_changed(self):
return flatten_boolean(self._values['is_root_password_changed'])
@property
def is_system_setup(self):
if self._values['is_system_setup'] is None:
return 'no'
return flatten_boolean(self._values['is_system_setup'])
@property
def chassis_serial(self):
if self._values['system-info'] is None:
return None
# Yes, this is still called "bigip" even though this is querying the BIG-IQ
# product. This is likely due to BIG-IQ inheriting TMOS.
if 'bigipChassisSerialNum' not in self._values['system-info'][0]:
return None
return self._values['system-info'][0]['bigipChassisSerialNum']
@property
def switch_board_serial(self):
if self._values['system-info'] is None:
return None
if 'switchBoardSerialNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['switchBoardSerialNum'].strip() == '':
return None
return self._values['system-info'][0]['switchBoardSerialNum']
@property
def switch_board_part_revision(self):
if self._values['system-info'] is None:
return None
if 'switchBoardPartRevNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['switchBoardPartRevNum'].strip() == '':
return None
return self._values['system-info'][0]['switchBoardPartRevNum']
@property
def platform(self):
if self._values['system-info'] is None:
return None
return self._values['system-info'][0]['platform']
@property
def host_board_serial(self):
if self._values['system-info'] is None:
return None
if 'hostBoardSerialNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['hostBoardSerialNum'].strip() == '':
return None
return self._values['system-info'][0]['hostBoardSerialNum']
@property
def host_board_part_revision(self):
if self._values['system-info'] is None:
return None
if 'hostBoardPartRevNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['hostBoardPartRevNum'].strip() == '':
return None
return self._values['system-info'][0]['hostBoardPartRevNum']
@property
def package_edition(self):
return self._values['Edition']
@property
def package_version(self):
return 'Build {0} - {1}'.format(self._values['Build'], self._values['Date'])
@property
def product_build(self):
return self._values['Build']
@property
def product_build_date(self):
return self._values['Date']
@property
def product_built(self):
if 'version_info' not in self._values:
return None
if 'Built' in self._values['version_info']:
return int(self._values['version_info']['Built'])
@property
def product_changelist(self):
if 'version_info' not in self._values:
return None
if 'Changelist' in self._values['version_info']:
return int(self._values['version_info']['Changelist'])
@property
def product_jobid(self):
if 'version_info' not in self._values:
return None
if 'JobID' in self._values['version_info']:
return int(self._values['version_info']['JobID'])
@property
def product_code(self):
return self._values['Product']
@property
def product_version(self):
return self._values['Version']
@property
def hardware_information(self):
if self._values['hardware-version'] is None:
return None
self._transform_name_attribute(self._values['hardware-version'])
| |
porosity: Optional[float]
Porous module parameter.
"""
sponge_half_height_x0 = 0.5 * (self.x0y0[1] + self.x0y1[1])
sponge_half_height_x1 = 0.5 * (self.x1y0[1] + self.x1y1[1])
sponge_x0 = self.x0y0[0]
sponge_x1 = self.x1y0[0]
waves = None
wind_speed = np.array([0., 0., 0.])
if x_n or x_p:
self._attachAuxiliaryVariable('RelaxZones')
if x_n is True:
center = np.array([sponge_x0 - 0.5 * self.spongeLayers['x-'],
sponge_half_height_x0, 0.])
ind = self.regionIndice['x-']
flag = self.regionFlags[ind]
epsFact_solid = self.spongeLayers['x-']/2.
orientation = np.array([1., 0.])
self.zones[flag] = bc.RelaxationZone(shape=self,
zone_type='absorption',
orientation=orientation,
center=center,
waves=waves,
wind_speed=wind_speed,
epsFact_solid=epsFact_solid,
dragAlpha=dragAlpha,
dragBeta=dragBeta,
porosity=porosity)
if x_p is True:
center = np.array([sponge_x1 + 0.5 * self.spongeLayers['x+'],
sponge_half_height_x1, 0.])
ind = self.regionIndice['x+']
flag = self.regionFlags[ind]
epsFact_solid = self.spongeLayers['x+']/2.
orientation = np.array([-1., 0.])
self.zones[flag] = bc.RelaxationZone(shape=self,
zone_type='absorption',
orientation=orientation,
center=center,
waves=waves,
wind_speed=wind_speed,
epsFact_solid=epsFact_solid,
dragAlpha=dragAlpha,
dragBeta=dragBeta,
porosity=porosity)
def setGenerationZones(self, waves=None, wind_speed=(0., 0., 0.),
x_n=False, x_p=False, dragAlpha=0.5/1.005e-6,
dragBeta=0., porosity=1., smoothing=0.):
"""
Sets regions (x+, x-) to generation zones
Parameters
----------
waves: proteus.WaveTools
Class instance of wave generated from proteus.WaveTools.
wind_speed: Optional[array_like]
Speed of wind in generation zone (default is (0., 0., 0.))
allSponge: bool
If True, all sponge layers are converted to generation zones.
x_p: bool
If True, x+ region is converted to generation zone.
x_n: bool
If True, x- region is converted to generation zone.
dragAlpha: Optional[float]
Porous module parameter.
dragBeta: Optional[float]
Porous module parameter.
porosity: Optional[float]
Porous module parameter.
"""
sponge_half_height_x0 = 0.5 * (self.x0y0[1] + self.x0y1[1])
sponge_half_height_x1 = 0.5 * (self.x1y0[1] + self.x1y1[1])
sponge_x0 = self.x0y0[0]
sponge_x1 = self.x1y0[0]
waves = waves
wind_speed = np.array(wind_speed)
if x_n or x_p:
self._attachAuxiliaryVariable('RelaxZones')
if x_n is True:
center = np.array([sponge_x0 - 0.5 * self.spongeLayers['x-'],
sponge_half_height_x0, 0.])
ind = self.regionIndice['x-']
flag = self.regionFlags[ind]
epsFact_solid = self.spongeLayers['x-']/2.
orientation = np.array([1., 0.])
self.zones[flag] = bc.RelaxationZone(shape=self,
zone_type='generation',
orientation=orientation,
center=center,
waves=waves,
wind_speed=wind_speed,
epsFact_solid=epsFact_solid,
dragAlpha=dragAlpha,
dragBeta=dragBeta,
porosity=porosity,
smoothing=smoothing)
self.BC['x-'].setUnsteadyTwoPhaseVelocityInlet(wave=waves,
wind_speed=wind_speed,
smoothing=smoothing)
if x_p is True:
center = np.array([sponge_x1 + 0.5 * self.spongeLayers['x+'],
sponge_half_height_x1, 0.])
ind = self.regionIndice['x+']
flag = self.regionFlags[ind]
epsFact_solid = self.spongeLayers['x+']/2.
orientation = np.array([-1., 0.])
self.zones[flag] = bc.RelaxationZone(shape=self,
zone_type='generation',
orientation=orientation,
center=center,
waves=waves,
wind_speed=wind_speed,
epsFact_solid=epsFact_solid,
dragAlpha=dragAlpha,
dragBeta=dragBeta,
porosity=porosity,
smoothing=smoothing)
self.BC['x+'].setUnsteadyTwoPhaseVelocityInlet(wave=waves,
wind_speed=wind_speed,
smoothing=smoothing)
class RigidBody(AuxiliaryVariables.AV_base):
"""
Auxiliary variable used to calculate attributes of an associated shape
class instance acting as a rigid body. To set a shape as a rigid body, use
shape.setRigidBody(). The class instance is created automatically when
shape.setRigidBody() has been called and after calling assembleDomain().
Parameters
----------
shape: proteus.mprans.SpatialTools.Shape_RANS
Class instance of the shape associated to the rigid body calculations.
cfl_target: Optional[float]
UNUSED (to implement), sets the maximum displacement of the body
allowed per time step.
dt_init: float
first time step of the simulation.
"""
def __init__(self, shape, cfl_target=0.9, dt_init=0.001):
self.Shape = shape
# if isinstance(shape, (Rectangle, Cuboid)):
# shape._setInertiaTensor()
self.dt_init = dt_init
self.cfl_target = 0.9
self.last_position = np.array([0., 0., 0.])
self.rotation_matrix = np.eye(3)
self.h = np.array([0., 0., 0.])
self.barycenter = np.zeros(3)
self.i_start = None # will be retrieved from setValues() of Domain
self.i_end = None # will be retrieved from setValues() of Domain
def attachModel(self, model, ar):
"""
Attaches model to auxiliary variable
"""
self.model = model
self.ar = ar
self.writer = Archiver.XdmfWriter()
self.nd = model.levelModelList[-1].nSpace_global
m = self.model.levelModelList[-1]
flagMax = max(m.mesh.elementBoundaryMaterialTypes)
# flagMin = min(m.mesh.elementBoundaryMaterialTypes)
self.nForces = flagMax+1
return self
def calculate_init(self):
"""
Function called at the very beginning of the simulation by proteus.
"""
nd = self.Shape.Domain.nd
shape = self.Shape
self.position = np.zeros(3)
self.position[:] = self.Shape.barycenter.copy()
self.last_position[:] = self.position
self.velocity = np.zeros(3, 'd')
self.last_velocity = np.zeros(3, 'd')
self.acceleration = np.zeros(3, 'd')
self.last_acceleration = np.zeros(3, 'd')
self.rotation = np.eye(3)
self.rotation[:nd, :nd] = shape.coords_system
self.last_rotation = np.eye(3)
self.last_rotation[:nd, :nd] = shape.coords_system
self.F = np.zeros(3, 'd')
self.M = np.zeros(3, 'd')
self.last_F = np.zeros(3, 'd')
self.last_M = np.zeros(3, 'd')
self.ang = 0.
self.barycenter = self.Shape.barycenter
self.angvel = np.zeros(3, 'd')
self.last_angvel = np.zeros(3, 'd')
if nd == 2:
self.Fg = self.Shape.mass*np.array([0., -9.81, 0.])
if nd == 3:
self.Fg = self.Shape.mass*np.array([0., 0., -9.81])
if self.Shape.record_values is True:
self.record_file = os.path.join(Profiling.logDir,
self.Shape.record_filename)
def calculate(self):
"""
Function called at each time step by proteus.
"""
# store previous values
self.last_position[:] = self.position
self.last_velocity[:] = self.velocity
self.last_acceleration[:] = self.acceleration
self.last_rotation[:] = self.rotation
self.last_angvel[:] = self.angvel
self.last_F[:] = self.F
self.last_M[:] = self.M
# for first time step
try:
dt = self.model.levelModelList[-1].dt_last
except:
dt = self.dt_init
# update forces and moments for current body/shape
i0, i1 = self.i_start, self.i_end
# get forces
F_p = self.model.levelModelList[-1].coefficients.netForces_p[i0:i1, :]
F_v = self.model.levelModelList[-1].coefficients.netForces_v[i0:i1, :]
F_g = self.Fg
F = np.sum(F_p + F_v, axis=0) + F_g
# get moments
M_t = self.model.levelModelList[-1].coefficients.netMoments[i0:i1, :]
M = np.sum(M_t, axis=0)
# store F and M with DOF constraints to body
self.F[:] = F2 = F*self.Shape.free_x
self.M[:] = M2 = M*self.Shape.free_r
# calculate new properties
self.step(dt)
# log values
t_previous = self.model.stepController.t_model_last-dt
t_current = self.model.stepController.t_model_last
h = self.h
last_pos, pos = self.last_position, self.position
last_vel, vel = self.last_velocity, self.velocity
rot = self.rotation
rot_x = atan2(rot[1, 2], rot[2, 2])
rot_y = -asin(rot[0, 2])
rot_z = atan2(rot[0, 1], rot[0, 0])
logEvent("================================================================")
logEvent("=================== Rigid Body Calculation =====================")
logEvent("================================================================")
logEvent("Name: " + `self.Shape.name`)
logEvent("================================================================")
logEvent("[proteus] t=%1.5fsec to t=%1.5fsec" % \
(t_previous, t_current))
logEvent("[proteus] dt=%1.5fsec" % (dt))
logEvent("[body] ============== Pre-calculation attributes ==============")
logEvent("[proteus] t=%1.5fsec" % (t_previous))
logEvent("[proteus] F=(% 12.7e, % 12.7e, % 12.7e)" % (F[0], F[1], F[2]))
logEvent("[proteus] F*DOF=(% 12.7e, % 12.7e, % 12.7e)" % (F2[0], F2[1], F2[2]))
logEvent("[proteus] M=(% 12.7e, % 12.7e, % 12.7e)" % (M[0], M[1], M[2]))
logEvent("[proteus] M*DOF=(% 12.7e, % 12.7e, % 12.7e)" % (M2[0], M2[1], M2[2]))
logEvent("[body] pos=(% 12.7e, % 12.7e, % 12.7e)" % \
(last_pos[0], last_pos[1], last_pos[2]))
logEvent("[body] vel=(% 12.7e, % 12.7e, % 12.7e)" % \
(last_vel[0], last_vel[1], last_vel[2]))
logEvent("[body] ===============Post-calculation attributes ==============")
logEvent("[body] t=%1.5fsec" % (t_current))
logEvent("[body] h=(% 12.7e, % 12.7e, % 12.7e)" % (h[0], h[1], h[2]))
logEvent("[body] pos=(% 12.7e, % 12.7e, % 12.7e)" % \
(pos[0], pos[1], pos[2]))
logEvent("[body] vel=(% 12.7e, % 12.7e, % 12.7e)" % \
(vel[0], vel[1], vel[2]))
logEvent("[body] rot=(% 12.7e, % 12.7e, % 12.7e)" % \
(rot_x, rot_y, rot_z))
logEvent("================================================================")
def step(self, dt):
"""
Step for rigid body calculations in Python
Parameters
----------
dt: float
time step
"""
nd = self.Shape.Domain.nd
# acceleration from force
self.acceleration = self.F/self.Shape.mass
# angular acceleration from moment
if sum(self.M) != 0:
self.inertia = self.Shape.getInertia(self.M, self.Shape.barycenter)
assert self.inertia != 0, 'Zero inertia: inertia tensor (It)' \
'was not set correctly!'
ang_acc = self.M[:]/self.inertia
else:
self.inertia = None
ang_acc = np.array([0., 0., 0.])
# substeps for smoother motion between timesteps
ang_disp = 0
substeps = 20
dt_sub = dt/float(substeps)
self.h[:] = np.zeros(3)
for i in range(substeps):
# displacement
self.velocity += self.acceleration*dt_sub
self.h += self.velocity*dt_sub
# rotation
self.angvel += ang_acc*dt_sub
ang_disp += self.angvel*dt_sub
# translate
self.Shape.translate(self.h[:nd])
# rotate
self.ang = np.linalg.norm(ang_disp)
if nd == 2 and self.angvel[2] < 0:
self.ang = -self.ang
if self.ang != 0.:
self.Shape.rotate(self.ang, self.angvel, self.Shape.barycenter)
self.rotation[:nd, :nd] = self.Shape.coords_system
self.rotation_matrix[:] = np.dot(np.linalg.inv(self.last_rotation),
self.rotation)
else:
self.rotation_matrix[:] = np.eye(3)
self.barycenter[:] = self.Shape.barycenter
self.position[:] = self.Shape.barycenter
if self.Shape.record_values is True:
self.recordValues()
def recordValues(self):
"""
Records values of rigid body attributes at each time step in a csv file.
"""
comm = Comm.get()
if comm.isMaster():
t_last = self.model.stepController.t_model_last
dt_last = self.model.levelModelList[-1].dt_last
values_towrite = []
t = t_last-dt_last
if t == 0:
headers = []
if self.Shape.record_dict['time'] is True:
headers += ['t']
if self.Shape.record_dict['pos'] is True:
headers += ['x', 'y', 'z']
if self.Shape.record_dict['rot'] is True:
headers += ['rx', 'ry', 'rz']
if self.Shape.record_dict['F'] is True:
headers += ['Fx', 'Fy', 'Fz']
if self.Shape.record_dict['M'] is True:
headers += ['Mx', 'My', 'Mz']
if self.Shape.record_dict['inertia'] is True:
headers += ['inertia']
if self.Shape.record_dict['vel'] is True:
headers += ['vel_x', 'vel_y', 'vel_z']
if self.Shape.record_dict['acc'] is True:
headers += ['acc_x', 'acc_y', 'acc_z']
with open(self.record_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(headers)
if self.Shape.record_dict['time'] is True:
t = t_last-dt_last
values_towrite += [t]
if self.Shape.record_dict['pos'] is True:
x, y, z = self.last_position
values_towrite += [x, y, z]
if self.Shape.record_dict['rot'] is True:
rot = self.last_rotation
rx = atan2(rot[1, 2], rot[2, 2])
ry = -asin(rot[0, 2])
rz = atan2(rot[0, 1], rot[0, 0])
values_towrite += [rx, ry, rz]
if self.Shape.record_dict['F'] is True:
Fx, Fy, Fz = self.F
values_towrite += [Fx, Fy, | |
# calculate
used_program_storage = 0
used_dynamic_memory = 0
for section, size, address in xtensa.sections(self.elffile):
if size == 0 or address == 0:
continue
for program_storage_section in self.program_storage_sections:
if section.startswith(program_storage_section):
log.info('FLASH : 0x{:08x} 0x{:08x} {}'.format(address, size, section))
used_program_storage += size
for dynamic_memory_section in self.dynamic_memory_sections:
if section.startswith(dynamic_memory_section):
log.info('MEMORY: 0x{:08x} 0x{:08x} {}'.format(address, size, section))
used_dynamic_memory += size
# stats
percent_program_storage = int((used_program_storage / self.program_storage) * 100)
percent_dynamic_memory = int((used_dynamic_memory / self.dynamic_memory) * 100)
remaining_dynamic_memory = 0
if used_dynamic_memory < self.dynamic_memory:
remaining_dynamic_memory = self.dynamic_memory - used_dynamic_memory
# present
log.info('Sketch uses {} bytes ({}%) of program storage space. Maximum is {} bytes.'.format(
used_program_storage, percent_program_storage, self.program_storage))
log.info('Global variables use {} bytes ({}%) of dynamic memory, leaving {} bytes for local variables. Maximum is {} bytes.'.format(
used_dynamic_memory, percent_dynamic_memory, remaining_dynamic_memory, self.dynamic_memory))
def gdb_info_pc(self, pc):
if os.name == 'nt':
args = [wsl.path_in_wsl(xtensa.gdb), '--batch', wsl.path_in_wsl(self.elffile)]
else:
args = [xtensa.gdb, '--batch', self.elffile]
args.extend(['-ex', 'set listsize 1'])
args.extend(['-ex', 'l *0x{:08x}'.format(pc)])
args.extend(['-ex', 'q'])
if os.name == 'nt':
args = ['bash', '-c', ' '.join(["'{}'".format(arg) for arg in args])]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('ascii', errors='replace').strip() + '\n\n'
line0 = stdout.split('\n')[0].strip()
line1 = stdout.split('\n')[1].strip()
method = None
path = None
lineno = None
code = None
if line0.startswith('0x') and line0.count('is in ') > 0:
method_file_lineno = 'is in '.join(line0.split('is in ')[1:])
if method_file_lineno.find('(/') == -1:
method = method_file_lineno.strip()
else:
if method_file_lineno.find(')') != -1:
method_file_lineno = method_file_lineno.split(')')[0]
method, path_lineno = method_file_lineno.split('(/')
method = method.strip()
if path_lineno.find(':') != -1:
path, lineno = path_lineno.split(':')
lineno = int(lineno.strip(), 0)
else:
path = path_lineno
path = path.strip()
if line1.strip() != '' and line1.count(' ') > 0:
code = ' '.join(line1.split(' ')[1:]).strip()
return method, path, lineno, code
def decode_and_log_backtrace(self, line):
line = line.replace('\t', ' ')
while line.count(' ') != 0:
line = line.replace(' ', ' ')
line = line.strip()
with log('esp32'):
with log('backtrace'):
for frame in line.split(' '):
if frame.count('0x') != 2:
continue
assert frame.count(':') == 1
pc, sp = frame.split(':')
pc = int(pc, 16)
sp = int(sp, 16)
try:
method, path, lineno, code = self.gdb_info_pc(pc)
except ValueError:
method = path = lineno = code = None
method = method if method else '??'
path = path if path else '??'
lineno = lineno if lineno else '??'
code = code if code else '??'
log.info('0x{:08x} {} at {}:{} "{}"'.format(pc, method, path, lineno, code))
class Atmega328p(object):
NAME = "atmega328p"
def __init__(self):
self.out = os.path.join(out, 'atmega328p')
self.baudrate = 115200
self.device = 'atmega328p'
self.hexfile = os.path.join(self.out, 'badge.hex')
@property
def comport(self):
with log('atmega328p'):
log.info('Finding atmega328p serial device...')
port = None
try:
port = serial.find('usb-serial ch340')
except SerialPortNotFoundError:
pass
if not port:
raise SerialPortNotFoundError('atmega328p')
return port
def clean(self):
with log('atmega328p'):
if os.path.exists(self.out):
log.info(self.out)
shutil.rmtree(self.out)
def generate(self, **kwargs):
with log('atmega328p'):
if not os.path.exists(self.out):
os.makedirs(self.out)
environ = os.environ.copy()
environ['PATH'] = '{}:{}'.format(arduino.bin, environ['PATH'])
environ['ARDUINO_SDK_PATH'] = arduino.sdk
cmake.generate(root,
self.out,
generator='Unix Makefiles',
toolchain=arduino.toolchain_cmake,
environ=environ,
**kwargs)
def build(self, cores):
if not os.path.exists(self.out):
with log("autogenerate"):
# `debug` is `False` otherwise we'd have generated already
self.generate()
with log('atmega328p'):
environ = os.environ.copy()
environ['PATH'] = '{}:{}'.format(arduino.bin, environ['PATH'])
environ['ARDUINO_SDK_PATH'] = arduino.sdk
make.build(self.out, environ=environ, cores=cores)
def flash(self, programmer, comport=None, baudrate=None):
with log('atmega328p'):
if not comport:
comport = self.comport
if not baudrate:
baudrate = self.baudrate
log.info('Flashing...please wait...')
avrdude.flash(path=self.hexfile,
comport=comport, baudrate=baudrate,
device=self.device, programmer=programmer)
class SAMD21(object):
NAME = "samd21"
_DEFAULT_PORTS = {
"Linux": "/dev/ttyACM0",
}
DEFAULT_PORT = _DEFAULT_PORTS.get(platform.system()) if not wsl.is_wsl() else None
def __init__(self):
self.out = os.path.join(out, 'samd21')
self.baudrate = 115200
self.binfile = os.path.join(self.out, 'io_coprocessor.bin')
@property
def comport_bootloader(self):
serial.clear_cache()
with log('samd21'):
log.info('Finding samd21 serial device...')
port = None
try:
port = serial.find('MKRZero bootloader')
except SerialPortNotFoundError:
pass
if not port:
raise SerialPortNotFoundError('samd21 (bootloader serial port)')
return port
@property
def comport(self):
if wsl.is_wsl():
time.sleep(2)
serial.clear_cache()
with log('samd21'):
log.info('Finding samd21 serial device...')
port = None
try:
port = serial.find('MKRZero')
except SerialPortNotFoundError:
pass
if not port:
raise SerialPortNotFoundError('samd21')
return port
def clean(self):
with log('samd21'):
if os.path.exists(self.out):
log.info(self.out)
shutil.rmtree(self.out)
def generate(self, **kwargs):
with log('samd21'):
if not os.path.exists(self.out):
os.makedirs(self.out)
environ = os.environ.copy()
environ['PATH'] = '{}:{}'.format(arduino_arm_tools.bin, environ['PATH'])
environ['ARDUINO_SDK_PATH'] = arduino.sdk
environ['ARDUINO_SAMD_PATH'] = arduino_samd.path
environ['CMSIS_ATMEL_PATH'] = cmsis_atmel.path
environ['CMSIS_PATH'] = cmsis.path
cmake.generate(root,
self.out,
generator='Unix Makefiles',
toolchain=arduino.toolchain_samd21_cmake,
environ=environ,
**kwargs)
def build(self, cores):
if not os.path.exists(self.out):
with log("autogenerate"):
# `debug` is `False` otherwise we'd have generated already
self.generate()
with log('samd21'):
environ = os.environ.copy()
environ['PATH'] = '{}:{}'.format(arduino_arm_tools.bin, environ['PATH'])
environ['ARDUINO_SDK_PATH'] = arduino.sdk
environ['ARDUINO_SAMD_PATH'] = arduino_samd.path
environ['CMSIS_ATMEL_PATH'] = cmsis_atmel.path
environ['CMSIS_PATH'] = cmsis.path
make.build(self.out, environ=environ, cores=cores)
def flash(self, comport=None):
with log('samd21'):
if not comport:
comport = self.comport_bootloader
log.info('Flashing...please wait...')
bossac.flash(path=self.binfile, comport=comport)
class HostLinux(object):
NAME= "host"
def __init__(self):
self.out = os.path.join(out, 'host')
self.binfile = os.path.join(self.out, 'src', 'badge', 'badge')
def clean(self):
with log('host'):
if os.path.exists(self.out):
log.info(self.out)
shutil.rmtree(self.out)
def generate(self, **kwargs):
with log('host'):
if not os.path.exists(self.out):
os.makedirs(self.out)
environ = os.environ.copy()
cmake.generate(root,
self.out,
generator='Unix Makefiles',
environ=environ,
**kwargs)
def build(self, cores):
if not os.path.exists(self.out):
with log("autogenerate"):
# `debug` is `False` otherwise we'd have generated already
self.generate()
with log('host'):
environ = os.environ.copy()
make.build(self.out, environ=environ, cores=cores)
def flatten(maybe_iter):
if isinstance(maybe_iter, collections.abc.Iterable):
for e in maybe_iter:
yield from flatten(e)
else:
yield maybe_iter
TARGETS = {
# XXX It's not clear if this still works
#Atmega328p.NAME: Atmega328p(),
Esp32.NAME: Esp32(),
SAMD21.NAME: SAMD21(),
HostLinux.NAME: HostLinux(),
}
TARGETS["2020-badge"] = tuple(TARGETS[k] for k in (Esp32.NAME, SAMD21.NAME))
TARGETS["2020-all"] = tuple(TARGETS[k] for k in ("2020-badge", HostLinux.NAME))
TARGET_ALL = "all"
TARGETS[TARGET_ALL] = TARGETS["2020-all"]
# Ensure all values are naively iterable
for k, v in TARGETS.items():
TARGETS[k] = tuple(flatten(v))
def download_tools(args):
arduino.download()
xtensa.download()
arduino_arm_tools.download()
cmsis_atmel.download()
cmsis.download()
bossac.download()
def check_tools(args):
cmake.check()
make.check()
wget.check()
tar.check()
arduino.check()
xtensa.check()
arduino_arm_tools.check()
cmsis_atmel.check()
cmsis.check()
bossac.check()
def download_libs(args):
if git.check():
git.init_submodules(external, lib_path)
esp_mdf.download()
esp_idf.download()
# This is a bit cheeky but we'll also pull down LFS files here
lfs.checkout()
def clean(args):
for target_obj in TARGETS[args.target]:
target_obj.clean()
if args.target == TARGET_ALL:
if os.path.exists(out):
log.info(out)
shutil.rmtree(out)
if not os.path.exists(out):
os.makedirs(out)
assert os.path.exists(out)
def generate(args):
for target_obj in TARGETS[args.target]:
target_obj.generate(debug=args.debug)
def build(args):
tasks_to_run = list()
for target_obj in TARGETS[args.target]:
tasks_to_run.append(
Process(
name=f"{target_obj.NAME}-build",
target=target_obj.build, args=(args.j, ),
)
)
for t in tasks_to_run:
t.start()
if args.j == 1:
t.join()
if args.j > 1:
for t in tasks_to_run:
t.join()
if any(t.exitcode for t in tasks_to_run):
log.error("Build tasks failed")
for t in tasks_to_run:
if t.exitcode:
log.error("%r failed", t.name)
sys.exit(1)
for target_obj in TARGETS[args.target]:
try:
target_obj.sketch_info()
except AttributeError:
pass
def flash(args):
for target_obj in TARGETS[args.target]:
comport_arg = f"{target_obj.NAME}_port"
try:
target_obj.flash(comport=getattr(args, comport_arg))
except AttributeError:
pass
def monitor(args):
# The host build gets monitored standalone
if args.target == HostLinux.NAME:
with log(HostLinux.NAME):
log.command(TARGETS[args.target][0].binfile)
return
# We need to run a new process under WSL on Windows
if wsl.is_wsl():
wsl_args = ['py.exe', '-3', wsl.path_on_windows(__file__)]
wsl_args.extend(['--monitor'])
wsl_args.extend(['--target', args.target])
for target_obj in TARGETS[args.target]:
comport_arg = f"{target_obj.NAME}_port"
if comport_arg in args:
port = getattr(args, comport_arg)
if not port:
port = target_obj.comport
wsl_args.extend([
f"--{target_obj.NAME}-port", port,
])
subprocess.call(wsl_args)
return
# Otherwise we build up the serial port objects we need and monitor them
serial_objs = {}
for target_obj in TARGETS[args.target]:
# Skip targets where we have neither a COM port property or an argument
# to set the COM port
comport_arg = f"{target_obj.NAME}_port"
try:
comport = getattr(args, comport_arg)
except AttributeError:
try:
comport = target_obj.comport
except AttributeError:
continue
# The SAMD21 often takes a bit to come back after being flashed so
# we'll delay if we get an `ENOENT` or `EPERM`
try:
serial_obj = serial(comport, target_obj.baudrate)
except serialutil.SerialException as exc:
if (
isinstance(exc.args[0], int) and
exc.args[0] in (errno.ENOENT, errno.EPERM)
):
log.warning(
"Pausing to wait for %r serial %r",
target_obj.NAME, comport,
)
time.sleep(2)
# Allow any exception to bubble up this time
serial_obj = serial(comport, target_obj.baudrate)
else:
raise exc
serial_objs[target_obj.NAME] = serial_obj
with contextlib.ExitStack() as ctx:
serial_ctxs = dict()
for name, serial_obj in serial_objs.items():
serial_ctxs[name] = ctx.enter_context(serial_obj)
while True:
for n, s in serial_ctxs.items():
line = s.readline(block=False)
if line:
line = line.decode('ascii', errors='replace').strip()
with log(n):
log.info(line)
# Special handling for for ESP32 backtraces
# XXX: This should avoid touching the target directly
if line.startswith('Backtrace:'):
(esp32, ) = TARGETS[Esp32.NAME]
esp32.decode_and_log_backtrace(line)
else:
# Don't churn the CPU so much...
time.sleep(0.01)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target',
help="Target for build",
choices=TARGETS.keys(),
default='all')
parser.add_argument('--download',
help="Download tools and libs for build environment",
action='store_true',
default=False)
parser.add_argument('--check',
help="Check for required tools",
action='store_true',
default=False)
parser.add_argument('--clean',
help="Clean build | |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: IOBinding.py
import os
import types
import sys
import codecs
import tempfile
import tkFileDialog
import tkMessageBox
import re
from Tkinter import *
from SimpleDialog import SimpleDialog
from idlelib.configHandler import idleConf
try:
from codecs import BOM_UTF8
except ImportError:
BOM_UTF8 = '\ufeff'
try:
import locale
locale.setlocale(locale.LC_CTYPE, '')
except (ImportError, locale.Error):
pass
filesystemencoding = sys.getfilesystemencoding()
encoding = 'ascii'
if sys.platform == 'win32':
try:
encoding = locale.getdefaultlocale()[1]
codecs.lookup(encoding)
except LookupError:
pass
else:
try:
encoding = locale.nl_langinfo(locale.CODESET)
if encoding is None or encoding is '':
encoding = 'ascii'
codecs.lookup(encoding)
except (NameError, AttributeError, LookupError):
try:
encoding = locale.getdefaultlocale()[1]
if encoding is None or encoding is '':
encoding = 'ascii'
codecs.lookup(encoding)
except (ValueError, LookupError):
pass
encoding = encoding.lower()
coding_re = re.compile('coding[:=]\\s*([-\\w_.]+)')
class EncodingMessage(SimpleDialog):
"""Inform user that an encoding declaration is needed."""
def __init__(self, master, enc):
self.should_edit = False
self.root = top = Toplevel(master)
top.bind('<Return>', self.return_event)
top.bind('<Escape>', self.do_ok)
top.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
top.wm_title('I/O Warning')
top.wm_iconname('I/O Warning')
self.top = top
l1 = Label(top, text='Non-ASCII found, yet no encoding declared. Add a line like')
l1.pack(side=TOP, anchor=W)
l2 = Entry(top, font='courier')
l2.insert(0, '# -*- coding: %s -*-' % enc)
l2.pack(side=TOP, anchor=W, fill=X)
l3 = Label(top, text='to your file\nChoose OK to save this file as %s\nEdit your general options to silence this warning' % enc)
l3.pack(side=TOP, anchor=W)
buttons = Frame(top)
buttons.pack(side=TOP, fill=X)
self.default = self.cancel = 0
b1 = Button(buttons, text='Ok', default='active', command=self.do_ok)
b1.pack(side=LEFT, fill=BOTH, expand=1)
b2 = Button(buttons, text='Edit my file', command=self.do_edit)
b2.pack(side=LEFT, fill=BOTH, expand=1)
self._set_transient(master)
def do_ok(self):
self.done(0)
def do_edit(self):
self.done(1)
def coding_spec(str):
"""Return the encoding declaration according to PEP 263.
Raise LookupError if the encoding is declared but unknown.
"""
str = str.split('\n')[:2]
str = '\n'.join(str)
match = coding_re.search(str)
if not match:
return
else:
name = match.group(1)
import codecs
try:
codecs.lookup(name)
except LookupError:
raise LookupError, 'Unknown encoding ' + name
return name
class IOBinding():
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind('<<open-window-from-file>>', self.open)
self.__id_save = self.text.bind('<<save-window>>', self.save)
self.__id_saveas = self.text.bind('<<save-window-as-file>>', self.save_as)
self.__id_savecopy = self.text.bind('<<save-copy-of-window-as-file>>', self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind('<<print-window>>', self.print_window)
return
def close(self):
self.text.unbind('<<open-window-from-file>>', self.__id_open)
self.text.unbind('<<save-window>>', self.__id_save)
self.text.unbind('<<save-window-as-file>>', self.__id_saveas)
self.text.unbind('<<save-copy-of-window-as-file>>', self.__id_savecopy)
self.text.unbind('<<print-window>>', self.__id_print)
self.editwin = None
self.text = None
self.filename_change_hook = None
return
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
return
def open(self, event=None, editFile=None):
if self.editwin.flist:
if not editFile:
filename = self.askopenfile()
else:
filename = editFile
if filename:
try:
interp = self.editwin.interp
except AttributeError:
interp = None
if not self.filename and self.get_saved() and not interp:
self.editwin.flist.open(filename, self.loadfile)
else:
self.editwin.flist.open(filename)
else:
self.text.focus_set()
return 'break'
else:
if self.get_saved():
reply = self.maybesave()
if reply == 'cancel':
self.text.focus_set()
return 'break'
if not editFile:
filename = self.askopenfile()
else:
filename = editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return 'break'
eol = '(\\r\\n)|\\n|\\r'
eol_re = re.compile(eol)
eol_convention = os.linesep
def loadfile(self, filename):
try:
f = open(filename, 'rb')
chars = f.read()
f.close()
except IOError as msg:
tkMessageBox.showerror('I/O Error', str(msg), master=self.text)
return False
chars = self.decode(chars)
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
if isinstance(self.eol_convention, unicode):
self.eol_convention = self.eol_convention.encode('ascii')
chars = self.eol_re.sub('\\n', chars)
self.text.delete('1.0', 'end')
self.set_filename(None)
self.text.insert('1.0', chars)
self.reset_undo()
self.set_filename(filename)
self.text.mark_set('insert', '1.0')
self.text.see('insert')
self.updaterecentfileslist(filename)
return True
def decode(self, chars):
"""Create a Unicode string
If that fails, let Tcl try its best
"""
if chars.startswith(BOM_UTF8):
try:
chars = chars[3:].decode('utf-8')
except UnicodeError:
return chars
self.fileencoding = BOM_UTF8
return chars
try:
enc = coding_spec(chars)
except LookupError as name:
tkMessageBox.showerror(title='Error loading the file', message="The encoding '%s' is not known to this Python installation. The file may not display correctly" % name, master=self.text)
enc = None
if enc:
try:
return unicode(chars, enc)
except UnicodeError:
pass
try:
return unicode(chars, 'ascii')
except UnicodeError:
pass
try:
chars = unicode(chars, encoding)
self.fileencoding = encoding
except UnicodeError:
pass
return chars
def maybesave(self):
if self.get_saved():
return 'yes'
else:
message = 'Do you want to save %s before closing?' % (self.filename or 'this untitled document')
confirm = tkMessageBox.askyesnocancel(title='Save On Close', message=message, default=tkMessageBox.YES, master=self.text)
if confirm:
reply = 'yes'
self.save(None)
if not self.get_saved():
reply = 'cancel'
elif confirm is None:
reply = 'cancel'
else:
reply = 'no'
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
elif self.writefile(self.filename):
self.set_saved(True)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
return 'break'
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return 'break'
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return 'break'
def writefile(self, filename):
self.fixlastline()
chars = self.encode(self.text.get('1.0', 'end-1c'))
if self.eol_convention != '\n':
chars = chars.replace('\n', self.eol_convention)
try:
f = open(filename, 'wb')
f.write(chars)
f.flush()
f.close()
return True
except IOError as msg:
tkMessageBox.showerror('I/O Error', str(msg), master=self.text)
return False
def encode(self, chars):
if isinstance(chars, types.StringType):
return chars
else:
try:
return chars.encode('ascii')
except UnicodeError:
pass
try:
enc = coding_spec(chars)
failed = None
except LookupError as msg:
failed = msg
enc = None
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
if failed:
tkMessageBox.showerror('I/O Error', '%s. Saving as UTF-8' % failed, master=self.text)
if self.fileencoding == BOM_UTF8 or failed:
return BOM_UTF8 + chars.encode('utf-8')
if self.fileencoding:
try:
return chars.encode(self.fileencoding)
except UnicodeError:
tkMessageBox.showerror('I/O Error', "Cannot save this as '%s' anymore. Saving as UTF-8" % self.fileencoding, master=self.text)
return BOM_UTF8 + chars.encode('utf-8')
config_encoding = idleConf.GetOption('main', 'EditorWindow', 'encoding')
if config_encoding == 'utf-8':
return BOM_UTF8 + chars.encode('utf-8')
ask_user = True
try:
chars = chars.encode(encoding)
enc = encoding
if config_encoding == 'locale':
ask_user = False
except UnicodeError:
chars = BOM_UTF8 + chars.encode('utf-8')
enc = 'utf-8'
if not ask_user:
return chars
dialog = EncodingMessage(self.editwin.top, enc)
dialog.go()
if dialog.num == 1:
encline = '# -*- coding: %s -*-\n' % enc
firstline = self.text.get('1.0', '2.0')
if firstline.startswith('#!'):
self.text.insert('2.0', encline)
else:
self.text.insert('1.0', encline)
return self.encode(self.text.get('1.0', 'end-1c'))
return chars
def fixlastline(self):
c = self.text.get('end-2c')
if c != '\n':
self.text.insert('end-1c', '\n')
def print_window(self, event):
confirm = tkMessageBox.askokcancel(title='Print', message='Print to Default Printer', default=tkMessageBox.OK, master=self.text)
if not confirm:
self.text.focus_set()
return 'break'
else:
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
if not saved or filename is None:
tfd, tempfilename = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return 'break'
platform = os.name
printPlatform = True
if platform == 'posix':
command = idleConf.GetOption('main', 'General', 'print-command-posix')
command = command + ' 2>&1'
elif platform == 'nt':
command = idleConf.GetOption('main', 'General', 'print-command-win')
else:
printPlatform = False
if printPlatform:
command = command % filename
pipe = os.popen(command, 'r')
output = pipe.read().strip()
status = pipe.close()
if status:
output = 'Printing failed (exit status 0x%x)\n' % status + output
if output:
output = 'Printing command: %s\n' % repr(command) + output
tkMessageBox.showerror('Print status', output, master=self.text)
else:
message = 'Printing is not enabled for this platform: %s' % platform
tkMessageBox.showinfo('Print status', message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return 'break'
opendialog = None
savedialog = None
filetypes = [
('Python files', '*.py *.pyw', 'TEXT'),
('Text files', '*.txt', 'TEXT'),
('All files', '*')]
def askopenfile(self):
dir, base = self.defaultfilename('open')
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text, filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
if isinstance(filename, unicode):
filename = filename.encode(filesystemencoding)
return filename
def defaultfilename(self, mode='open'):
if self.filename:
return os.path.split(self.filename)
else:
if self.dirname:
return (self.dirname, '')
try:
pwd = os.getcwd()
except os.error:
pwd = ''
return (pwd, '')
def asksavefile(self):
dir, base = self.defaultfilename('save')
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(master=self.text, filetypes=self.filetypes)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
if isinstance(filename, unicode):
filename = filename.encode(filesystemencoding)
return filename
def updaterecentfileslist(self, filename):
"""Update recent file list on all editor windows"""
self.editwin.update_recent_files_list(filename)
def test():
root = Tk()
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind('<Control-o>', self.open)
self.text.bind('<Control-s>', self.save)
self.text.bind('<Alt-s>', self.save_as)
self.text.bind('<Alt-z>', self.save_a_copy)
return
def get_saved(self):
return 0
def set_saved(self, flag):
pass
def reset_undo(self):
pass
def open(self, event):
self.text.event_generate('<<open-window-from-file>>')
def save(self, event):
self.text.event_generate('<<save-window>>')
def save_as(self, event):
self.text.event_generate('<<save-window-as-file>>')
def save_a_copy(self, event):
self.text.event_generate('<<save-copy-of-window-as-file>>')
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = | |
import functools
import os, sys
import time
import cv2
import numpy as np
import pickle
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn import mixture
from sklearn.metrics.cluster import normalized_mutual_info_score
from scipy.stats import moment
from tensorflow.linalg import logdet, trace, inv
import libs.config as cfg
import libs.nets.nets_factory as network
from libs.load_data import *
from libs.dp.merge import Gibbs_DPM_Gaussian_summary_input
from libs.dp.VI_PYMMG_functions import R_VI_PYMMG_CoC
FLAGS = tf.app.flags.FLAGS
def gmm_loss(ys, mus, gammas):
"""clustering loss L0.
Args:
y: nxd tensor: NxD
mu: nxd tensor; mus multiplied by assign index: NxD
gamma: dxdxn; precison matrix: NxDxD
"""
ll = tf.zeros([], dtype=tf.float32)
def condition(i, ys, mus, gammas, ll):
r = tf.less(i, tf.shape(ys))
return r[0]
def loop(i, ys, mus, gammas, ll):
y = tf.expand_dims(ys[i], 0) #1xD
mu = tf.expand_dims(mus[i], 0) #1xD
gamma = gammas[i] #DxD
ll = ll + tf.squeeze(tf.matmul(tf.matmul((y - mu), gamma),
tf.transpose(y - mu)))
return [i+1, ys, mus, gammas, ll]
i = 0
[i, ys, mus, gammas, ll] = tf.while_loop(condition, loop,
[i, ys, mus, gammas, ll])
return ll/tf.cast(tf.shape(ys)[0], tf.float32)
def standardize(x):
"""standardize a tensor.
Args:
x is a nxp tensor
"""
meanv, varv = tf.nn.moments(x, 0) # p
stdv = tf.sqrt(varv)
return (x - meanv)/stdv
def np_standardize(x):
"""standardize a numpy array.
Args:
x is a nxp array
"""
stdv = (moment(x, moment=2,axis=0))**0.5
meanv = np.mean(x,axis=0)
return (x - meanv)/stdv, meanv, stdv
def restore(sess, opt=0):
"""restore session with different options
Args:
opt = 1: restore from checkpoint
opt = 0: restore from pretrained initializatoin (remove fc layers)
"""
checkpoint_path = FLAGS.checkpoint_path
vars_to_restore = tf.trainable_variables()
vars_to_restore1 = vars_to_restore[:]
if FLAGS.normalize == 1 and opt == 0:
for var in vars_to_restore1:
if 'batchnorm' in var.name:
vars_to_restore.remove(var)
for var in vars_to_restore1:
if 'ip' in var.name or 'fc4' in var.name:
vars_to_restore.remove(var)
restorer = tf.train.Saver(vars_to_restore)
restorer.restore(sess, checkpoint_path)
def train():
## set the parameters for different datasets
if FLAGS.dataset == 'mnist_test':
img_height = img_width = 28
learning_rate = 0.001
Detcoef = 50
apply_network = 'lenet'
elif FLAGS.dataset == 'usps':
img_height = img_width = 16
learning_rate = 0.0001
Detcoef = 50
apply_network = 'lenet0'
elif FLAGS.dataset == 'frgc':
img_height = img_width = 32
learning_rate = 0.1
Detcoef = 20
apply_network = 'lenet'
elif FLAGS.dataset == 'ytf':
img_height = img_width = 55
learning_rate = 0.1
Detcoef = 20
apply_network = 'lenet'
elif FLAGS.dataset == 'umist':
img_height = 112
img_width = 92
learning_rate = 0.0001
Detcoef = 20
apply_network = 'dlenet'
else:
img_height = FLAGS.img_height
img_width = FLAGS.img_width
learning_rate = FLAGS.learning_rate
Detcoef = FLAGS.Detcoef
apply_network = FLAGS.network
tf.logging.set_verbosity(tf.logging.DEBUG)
with tf.Graph().as_default():
# tensor for input images
if FLAGS.is_resize:
imageip = tf.placeholder(tf.float32, [None, FLAGS.resize_height, FLAGS.resize_width, 3])
else:
imageip = tf.placeholder(tf.float32, [None, img_height, img_width, 3])
# get the embedding data from the network
_, end_points =network.get_network(apply_network, imageip, FLAGS.max_k,
weight_decay=FLAGS.weight_decay, is_training=True, reuse = False, spatial_squeeze=False)
# fc3 is the name of our embedding layer
end_net = end_points['fc3']
# normalize the embedding data
if FLAGS.normalize==0: # standardize
end_data = standardize(end_net)
elif FLAGS.normalize==1: # batch normalize
end_data = slim.batch_norm(end_net, activation_fn=None, scope='batchnorm',is_training=True)
# calculate LD the sample covaraince variance matrix of embedding data
diff_data = end_data - tf.expand_dims(tf.reduce_mean(end_data, 0),0)
cov_data = 1. / (tf.cast(tf.shape(end_data)[0], tf.float32) - 1.)*tf.matmul(tf.transpose(diff_data), diff_data)
det_loss = - logdet(cov_data)
# get the numpy data for both purpose of clustering and evaluation
_, val_end_points =network.get_network(apply_network, imageip, FLAGS.max_k,
weight_decay=FLAGS.weight_decay, is_training=False, reuse = True, spatial_squeeze=False)
val_end_data = val_end_points['fc3']
if FLAGS.normalize==1:
val_end_data = slim.batch_norm(val_end_data, activation_fn=None, scope='batchnorm',is_training=False, reuse=True)
# clustering loss
cls_mus = tf.placeholder(tf.float32, [None, FLAGS.embed_dims])
cls_Gammas = tf.placeholder(tf.float32, [None, FLAGS.embed_dims, FLAGS.embed_dims])
cluster_loss = gmm_loss(end_data, cls_mus, cls_Gammas)
# l2 regularization
penalty = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# total loss
total_loss = cluster_loss + Detcoef*det_loss
if penalty:
l2_penalty = tf.add_n(penalty)
total_loss += l2_penalty
global_step = slim.create_global_step()
## load the data
df_path = '{}/{}.h5'.format(FLAGS.dataset_dir, FLAGS.dataset)
f = h5py.File(df_path, 'r')
## Get the data
data = list(f['data'])
label = list(f['labels'])
train_datum = load_train_data(data,label)
train_datum.center_data()
train_datum.shuffle(100)
val_data, val_truth = np.copy(train_datum.data), np.copy(train_datum.label)
## set up mini-batch steps and optimizer
batch_num = train_datum.data.shape[0]//FLAGS.batch_size
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, batch_num, 0.0001*batch_num, True)
var_list = tf.trainable_variables()
opt = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = FLAGS.momentum)
train_opt = slim.learning.create_train_op(
total_loss, opt,
global_step=global_step,
variables_to_train=var_list,
summarize_gradients=False)
## load session
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.90)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
sess.run(init_op)
## log setting and results
timestampLaunch = time.strftime("%d%m%Y") + '-' + time.strftime("%H%M%S")
# record config
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
if not os.path.exists(os.path.join(FLAGS.out_dir, FLAGS.dataset)):
os.makedirs(os.path.join(FLAGS.out_dir, FLAGS.dataset))
outdir = os.path.join(FLAGS.out_dir, FLAGS.dataset, timestampLaunch)
if not os.path.exists(outdir):
os.makedirs(outdir)
if FLAGS.dataset == 'umist':
max_periods = 2000
else:
max_periods = FLAGS.max_periods
# load saver and restore session
saver = tf.train.Saver(max_to_keep=3)
if FLAGS.restore_previous_if_exists:
restore(sess, 1)
else:
if FLAGS.if_initialize_from_pretrain:
restore(sess, 0)
period_cluster_l, period_det_l, period_tot_l, conv_cluster_l = [], [], [], [sys.float_info.max]
""" start the training """
print('start training the dataset of {}'.format(FLAGS.dataset))
for period in range(max_periods):
real_period = period + FLAGS.checkpoint_periods
'''Forward steps'''
## get the numpy array of embedding data for clustering
val_embed = []
if FLAGS.dataset == 'mnist_test': #10000
for s in range(10):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'usps': # 11000
for s in range(11):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'frgc': # 2462
for s in range(25):
start = s*100
end = (s+1)*100
if s == 24:
end = end - 38
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'ytf': ##55x55; 10000
for s in range(10):
start = s*1000
end = (s+1)*1000
val_embed_x = sess.run(val_end_data, feed_dict={imageip:val_data[start:end]})
val_embed.append(val_embed_x)
elif FLAGS.dataset == 'umist': # < 2000
val_embed = sess.run(val_end_data, feed_dict={imageip:val_data})
if FLAGS.dataset != 'umist':
val_embed = np.concatenate(val_embed,axis=0)
if FLAGS.normalize==0:
val_embed, val_mean, val_std = np_standardize(val_embed)
### use dpm to cluster the embedding data
dpgmm = mixture.BayesianGaussianMixture(n_components=FLAGS.max_k,
weight_concentration_prior=FLAGS.alpha/FLAGS.max_k,
weight_concentration_prior_type='dirichlet_process',
covariance_prior=FLAGS.embed_dims*np.identity(FLAGS.embed_dims),
covariance_type='full').fit(val_embed)
val_labels = dpgmm.predict(val_embed)
if FLAGS.onsign:
### SIGN algorithm to merge clusters
ulabels = np.unique(val_labels).tolist()
uln_l = []
ulxtx_l = []
ulxx_l = []
for ul in ulabels:
ulx = val_embed[val_labels==ul,:] #Nk x p
uln = np.sum(val_labels==ul) #Nk
ulxtx = np.matmul(ulx.T, ulx) #p x p
ulxx = np.sum(ulx, axis=0) # p
uln_l.append(uln)
ulxtx_l.append(ulxtx)
ulxx_l.append(ulxx)
uxx = np.stack(ulxx_l, axis=0) #kxp
un = np.array(uln_l) # k
uxtx = np.stack(ulxtx_l, axis=0).T # p x p x k
if FLAGS.embed_dims < 50:
Rest = Gibbs_DPM_Gaussian_summary_input(uxtx, uxx, un) # mcmc
else:
Rest = R_VI_PYMMG_CoC(uxtx, uxx, un) # variational inference
member, dp_Gammas, dp_mus = Rest['member_est'], Rest['Prec'], Rest['mu']
val_labels_new = np.copy(val_labels)
for u, ul in enumerate(ulabels):
val_labels_new[val_labels==ul] = int(member[u]) # order the cluster value with index
val_labels = np.copy(val_labels_new)
# evaluate and save the results
val_count = np.bincount(val_labels)
val_count2 = np.nonzero(val_count)
est_cls = {}
for v in val_count2[0].tolist():
est_cls[v] = []
for vv, vl in enumerate(val_labels.tolist()):
est_cls[vl].append(val_truth[vv])
## sort the labels to be used for backward
train_labels_new = np.copy(val_labels)
member1 = np.array([int(m) for m in member])
member2 = np.unique(member1)
member2.sort()
train_labels_new1 = np.copy(train_labels_new)
for mbi, mb in enumerate(member2.tolist()):
train_labels_new1[train_labels_new==mb] = mbi
train_labels_onehot = np.eye(member2.shape[0])[train_labels_new1]
else:
dp_mus = dpgmm.means_
dp_Gammas = dpgmm.precisions_.T
train_labels_onehot = np.eye(FLAGS.max_k)[val_labels]
nmi = normalized_mutual_info_score(val_labels, val_truth)
if period > 0:
print("NMI for period{} is {}".format(period,nmi))
if period >= 100:
## check if the results need to be saved using det_loss and cluster_loss
dperiod_det_loss = np.abs((period_det_l[-1] - period_det_l[-2])/period_det_l[-2])
if dperiod_det_loss <= FLAGS.epsilon:
conv_cluster_l.append(period_cluster_loss)
if conv_cluster_l[-1] < min(conv_cluster_l[:-1]):
best_nmi, best_period = nmi, real_period
saver.save(sess, os.path.join(outdir, 'ckpt'), real_period)
# save truth and labels
np.savez(os.path.join(outdir,'labels_{}.npy'.format(real_period)),
val_labels=val_labels, val_truth=val_truth,
val_mean=val_mean, val_std=val_std)
# save dpm model
with open(os.path.join(outdir, 'model_{}.pkl'.format(real_period)), 'wb') as pf:
pickle.dump(dpgmm, pf)
if period < max_periods - 1:
''' Backward steps'''
# require: train_labels_onehot:NxK; dp_mus: KxD; dp_Gammas: DxDxK
train_datum.reset() # reset data from the original order to match predicted label
period_cluster_loss, period_det_loss = 0., 0.
for step in range(batch_num):
real_step = step + real_period*batch_num
train_x, train_y = train_datum.nextBatch(FLAGS.batch_size)
start, end = step*FLAGS.batch_size, (step+1)*FLAGS.batch_size
step_labels_onehot = train_labels_onehot[start:end]
cls_mu = np.matmul(step_labels_onehot, dp_mus) # NxK x KxD=> NxD
cls_Gamma = np.matmul(dp_Gammas, step_labels_onehot.T).T # DxDxK KxN => DxDxN => NxDxD
_, dlossv, dtlossv= sess.run([train_opt, cluster_loss, det_loss],
feed_dict={imageip:train_x, cls_mus:cls_mu, cls_Gammas: cls_Gamma})
# save loss
period_cluster_loss += dlossv/batch_num
period_det_loss += | |
#!/usr/bin/env python3
import re
import glob
import os
import argparse
import subprocess
import platform
import shutil
import distutils.dir_util
import json
from pathlib import Path
if any(platform.win32_ver()):
import scripts.find_visual_studio
def run(dest_file, dest_dir, project, max_num_configs, verbose):
def debug_print(s):
if verbose:
print(s)
# ===============================================================
# framework
is_windows = any(platform.win32_ver())
is_linux = not is_windows
class Config:
cpp = None
args = None
compiler = None
compiler_name = None
variant = None
compiler_type = None
def __init__(self):
self.args = []
def generate_configs():
msvc_variants = [
['Debug', ['/Od', '/Ob0', '/MDd', '/GS', '/DWIN32', '/D_WINDOWS']],
['RelWithDebInfo', ['/O2', '/Ob1', '/MD', '/GS', '/DWIN32', '/D_WINDOWS', '/DNDEBUG']],
['Release', ['/O2', '/Ob2', '/MD', '/GS', '/DWIN32', '/D_WINDOWS', '/DNDEBUG']],
]
gcc_variants = [
["Debug", ['-O0', '-g']],
["RelWithDebInfo", ['-O2', '-g', '-DNDEBUG']],
["Release", ['-O3', '-DNDEBUG']],
]
def make_cpp_arg(cpp, compiler_type):
if compiler_type == 'msvc':
return "/std:c++{}".format(cpp)
elif compiler_type == 'gcc':
return "-std=c++{}".format(cpp)
else:
assert False, "Unkown compiler type"
if is_windows:
def get_absolute_path(prog):
return subprocess.check_output(['where.exe', prog]).decode('utf-8').splitlines()[0]
for vs_version in [2019, 2017, 2015]:
vs_path = scripts.find_visual_studio.run(vs_version)
if vs_path is not None:
def execute_and_steal_environment(args):
null = open(os.devnull, 'w')
environment = subprocess.check_output(args + ['&&', 'set'], stderr=null)
for env in environment.splitlines():
k, _, v = map(str.strip, env.decode('utf-8').strip().partition('='))
if k.startswith('?'):
continue
os.environ[k] = v
is_64_bit = platform.machine().endswith('64')
toolst_arch = 'x64' if is_64_bit else 'x86'
vcvarsall_path = vs_path / Path('VC/Auxiliary/Build/vcvarsall.bat')
assert vcvarsall_path.exists(), 'could not find vcvarsall.bat'
execute_and_steal_environment([vcvarsall_path, toolst_arch, toolst_arch])
cl_path = get_absolute_path('cl.exe')
cc = ['Visual Studio {}'.format(vs_version), cl_path]
for cpp in [14, 17]:
for variant in msvc_variants:
c = Config()
c.cpp = cpp
c.args = variant[1] + [make_cpp_arg(cpp, 'msvc')]
c.variant = variant[0]
c.compiler = cc[1]
c.compiler_name = cc[0]
c.compiler_type = 'msvc'
yield c
break
for cc in [
['Clang', get_absolute_path('clang.exe'), 'gcc', gcc_variants],
['Clang-Cl', get_absolute_path('clang-cl.exe'), 'msvc', msvc_variants]
]:
if not os.path.exists(cc[1]):
continue
for cpp in [11, 14, 17]:
for variant in cc[3]:
if cc[1] is not None:
c = Config()
c.cpp = cpp
c.args = variant[1] + [make_cpp_arg(cpp, cc[2])]
c.variant = variant[0]
c.compiler = cc[1]
c.compiler_name = cc[0]
c.compiler_type = cc[2]
yield c
elif is_linux:
for cc in [
['Clang 6', '/usr/bin/clang++-6'],
['Clang 7', '/usr/bin/clang++-7'],
['Clang 8', '/usr/bin/clang++-8'],
['Clang 9', '/usr/bin/clang++-9'],
['GCC 7', '/usr/bin/g++-7'],
['GCC 8', '/usr/bin/g++-8'],
['GCC 9', '/usr/bin/g++-9'],
]:
if not os.path.exists(cc[1]):
continue
for libcpp in [False, True]:
if libcpp and not cc[0].startswith("Clang"):
continue
if libcpp:
continue # TODO: install multiple versions
extra_args = []
var_suffix = ""
if libcpp:
extra_args.append('-stdlib=libc++')
var_suffix = " (libc++)"
for cpp in [11, 14, 17]:
for variant in gcc_variants:
c = Config()
c.cpp = cpp
c.args = variant[1] + extra_args + ["-march=skylake", make_cpp_arg(cpp, 'gcc')]
c.variant = variant[0] + var_suffix
c.compiler = cc[1]
c.compiler_name = cc[0]
c.compiler_type = 'gcc'
yield c
else:
assert False, "unknown platform"
all_configs = list(generate_configs())
since_cpp14_configs = [c for c in all_configs if c.cpp >= 14]
since_cpp17_configs = [c for c in all_configs if c.cpp >= 17]
def truncate_cfgs(cfgs):
if max_num_configs and max_num_configs < len(cfgs):
cfgs = cfgs[0:max_num_configs]
return cfgs
all_configs = truncate_cfgs(all_configs)
since_cpp14_configs = truncate_cfgs(since_cpp14_configs)
since_cpp17_configs = truncate_cfgs(since_cpp17_configs)
project_list = []
project_jobs = {}
def add(category, project, project_url, url, version, name, file, configs, cwd, *, extra_args=[], include_dirs=[]):
if project not in project_jobs:
project_list.append(project)
project_jobs[project] = []
for c in configs:
job = {
"category": category,
"project": project,
"project_url": project_url,
"url": url,
"version": version,
"name": name,
"file": file,
"variant": c.variant,
"compiler_type": c.compiler_type,
"args": c.args + extra_args,
"cpp": c.cpp,
"include_dirs": include_dirs,
"compiler": c.compiler,
"compiler_name": c.compiler_name,
"working_dir": cwd
}
project_jobs[project].append(job)
debug_print("added {}".format(job))
# ===============================================================
# Projects
# ===============================================================
if not project or project == 'stl_cpp':
# ===============================================================
# c++ std
url_cpp = "https://en.cppreference.com/w/cpp/header"
for h in [
"cstdlib",
"csignal",
"csetjmp",
"cstdarg",
"typeinfo",
"typeindex",
"type_traits",
"bitset",
"functional",
"utility",
"ctime",
"chrono",
"cstddef",
"initializer_list",
"tuple",
"new",
"memory",
"scoped_allocator",
"climits",
"cfloat",
"cstdint",
"cinttypes",
"limits",
"exception",
"stdexcept",
"cassert",
"system_error",
"cerrno",
"cctype",
"cwctype",
"cstring",
"cwchar",
"cuchar",
"string",
"array",
"vector",
"deque",
"list",
"forward_list",
"set",
"map",
"unordered_set",
"unordered_map",
"stack",
"queue",
"iterator",
"algorithm",
"cmath",
"complex",
"valarray",
"random",
"numeric",
"ratio",
"cfenv",
"iosfwd",
"ios",
"istream",
"ostream",
"iostream",
"fstream",
"sstream",
# "strstream", # deprecated
"iomanip",
"streambuf",
"cstdio",
"locale",
"clocale",
"regex",
"atomic",
"thread",
"mutex",
"future",
"condition_variable",
]:
add("Standard Library", "C++ Standard Library", url_cpp,
url_cpp + "/" + h, "", "<" + h + ">", h, all_configs, dest_dir)
for h in [
"shared_mutex",
]:
add("Standard Library", "C++ Standard Library", url_cpp,
url_cpp + "/" + h, "", "<" + h + ">", h, since_cpp14_configs, dest_dir)
for h in [
"any",
"optional",
"variant",
"memory_resource",
"string_view",
"charconv",
"execution",
"filesystem",
]:
for c in since_cpp17_configs:
if h in ["memory_resource", "execution"] and "clang" in c.compiler:
continue
if c.compiler.endswith("/g++-7") and h == "filesystem":
continue
if (c.compiler.endswith("/g++-7") or c.compiler.endswith("/g++-8")) and h in ["memory_resource", "charconv", "execution"]:
continue
add("Standard Library", "C++ Standard Library", url_cpp,
url_cpp + "/" + h, "", "<" + h + ">", h, [c], dest_dir)
if not project or project == 'stl_c':
# ===============================================================
# c std
url_c = "https://en.cppreference.com/w/c/header"
for h in [
"assert.h",
"complex.h",
"ctype.h",
"errno.h",
"fenv.h",
"float.h",
"inttypes.h",
"iso646.h",
"limits.h",
"locale.h",
"math.h",
"setjmp.h",
"signal.h",
# C11: "stdalign.h",
"stdarg.h",
# C11: "stdatomic.h",
"stdbool.h",
"stddef.h",
"stdint.h",
"stdio.h",
"stdlib.h",
# C11: "stdnoreturn.h",
"string.h",
"tgmath.h",
# C11: "threads.h",
"time.h",
# C11: "uchar.h",
"wchar.h",
"wctype.h",
]:
add("Standard Library", "C Standard Library",
url_c, None, "", "<" + h + ">", h, all_configs, dest_dir)
if (not project or project == 'posix') and not is_windows:
# ===============================================================
# c POSIX
for h in [
"aio.h",
"arpa/inet.h",
"assert.h",
"complex.h",
"cpio.h",
"ctype.h",
"dirent.h",
"dlfcn.h",
"errno.h",
"fcntl.h",
"fenv.h",
"float.h",
"fmtmsg.h",
"fnmatch.h",
"ftw.h",
"glob.h",
"grp.h",
"iconv.h",
"inttypes.h",
"iso646.h",
"langinfo.h",
"libgen.h",
"limits.h",
"locale.h",
"math.h",
"monetary.h",
"mqueue.h",
# "ndbm.h", missing on ubuntu
"net/if.h",
"netdb.h",
"netinet/in.h",
"netinet/tcp.h",
"nl_types.h",
"poll.h",
"pthread.h",
"pwd.h",
"regex.h",
"sched.h",
"search.h",
"semaphore.h",
"setjmp.h",
"signal.h",
"spawn.h",
"stdarg.h",
"stdbool.h",
"stddef.h",
"stdint.h",
"stdio.h",
"stdlib.h",
"string.h",
"strings.h",
"stropts.h",
"sys/ipc.h",
"sys/mman.h",
"sys/msg.h",
"sys/resource.h",
"sys/select.h",
"sys/sem.h",
"sys/shm.h",
"sys/socket.h",
"sys/stat.h",
"sys/statvfs.h",
"sys/time.h",
"sys/times.h",
"sys/types.h",
"sys/uio.h",
"sys/un.h",
"sys/utsname.h",
"sys/wait.h",
"syslog.h",
"tar.h",
"termios.h",
"tgmath.h",
"time.h",
"unistd.h",
"utime.h",
"utmpx.h",
"wchar.h",
"wctype.h",
"wordexp.h",
]:
add("Standard Library", "C POSIX Library",
"https://en.wikipedia.org/wiki/C_POSIX_library", None, "", "<" + h + ">", h, all_configs, dest_dir)
# ===============================================================
# stdlibs
# TODO: properly get source for different stdlibs
# debug_print("getting standard libraries")
#
# def get_stdlib(url, versions, name):
# global args
# repo_dir = os.path.join(dest_dir, "stdlibs", name)
# if not os.path.exists(repo_dir):
# git_args = ["git", "clone", url, repo_dir]
# debug_print(" .. getting stdlib via " + repo_dir)
# subprocess.check_call(git_args)
#
# get_stdlib("git://gcc.gnu.org/git/gcc.git", [], "libcstd++")
# ===============================================================
# libs
debug_print("parsing libraries")
def add_project_files(cfg, cat, lib, libpath):
assert "url" in cfg, "project.json needs at least an URL"
for v in os.listdir(libpath):
vpath = libpath + "/" + v
if not os.path.isdir(vpath):
continue
debug_print(" " + vpath)
for (dirname, _, files) in os.walk(vpath):
for f in files:
fpath = dirname + "/" + f
rfpath = fpath[len(vpath)+1:]
if not os.path.isfile(fpath):
continue
if "whitelist" in cfg and not rfpath in cfg["whitelist"]:
debug_print(" " + fpath +
" (" + rfpath + ") - IGNORED")
continue
debug_print(" " + fpath + " (" + rfpath + ")")
ext = os.path.splitext(fpath)[-1]
if len(ext) < 2 or ext[1] not in ['c', 'h']:
continue
furl = None
if "file_url_pattern" in cfg:
furl = cfg["file_url_pattern"].replace(
"$version", v).replace("$file", rfpath)
if "no_url_for_files" in cfg:
if re.fullmatch(cfg["no_url_for_files"], f):
furl = None
add(cat, lib, cfg["url"], furl, v,
rfpath, rfpath, all_configs, vpath, include_dirs=[vpath])
def make_github_file_url(cfg, v, f):
return os.path.join(cfg["url"], "blob", v, cfg["working_dir"], f)
def make_gitlab_file_url(cfg, v, f):
return os.path.join(cfg["url"], "-", "blob", v, cfg["working_dir"], f)
fetched_repos = set()
def get_repo_files(url, version, base_dir, target_dir):
debug_print(" .. getting files from " + url)
urltype = None
# e.g. https://github.com/boostorg/config
if url.startswith("https://github.com"):
urltype = "github"
m = re.fullmatch(r"https://github\.com/([\w-]+)/([\w-]+)/?", url)
assert m is not None, "malformed url"
user = m.group(1)
proj = m.group(2)
# e.g. https://gitlab.com/libeigen/eigen
elif url.startswith("https://gitlab.com"):
urltype = "gitlab"
m = re.fullmatch(r"https://gitlab\.com/([\w-]+)/([\w-]+)", url)
assert m is not None, "malformed url"
user = m.group(1)
proj = m.group(2)
# e.g. https://graphics.rwth-aachen.de:9000/OpenMesh/OpenMesh
elif url.startswith("https://graphics.rwth-aachen.de:9000"):
urltype = "rwth-graphics"
m = re.fullmatch(
r"https://graphics\.rwth-aachen\.de:9000/([\w-]+)/([\w-]+)", url)
assert m is not None, "malformed url"
user = m.group(1)
proj = m.group(2)
else:
assert False, "unknown/unsupported repo"
repo_dir = os.path.join(dest_dir, "repos", urltype, user, proj)
debug_print(" .. repo in | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
class RewriteChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, expected):
res = self.analyzer.rewrite_simplify(data)
assert tvm.ir_pass.Equal(res, expected), "data={}, res={}, expected={}".format(data, res, expected)
def test_vector_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
# Add rules
ck.verify(tvm.expr.Ramp(x, 1, 4) + tvm.expr.Ramp(y, 2, 4),
tvm.expr.Ramp(x + y, 3, 4))
ck.verify(tvm.expr.Ramp(x, 1, 2) + y,
tvm.expr.Ramp(x + y, 1, 2))
ck.verify(y + tvm.expr.Ramp(x, 1, 2) ,
tvm.expr.Ramp(y + x, 1, 2))
ck.verify(y.astype("int32x2") + x.astype("int32x2"),
(y + x).astype("int32x2"))
# Sub rules
ck.verify(tvm.expr.Ramp(x, 4, 4) - tvm.expr.Ramp(y, 2, 4),
tvm.expr.Ramp(x - y, 2, 4))
ck.verify(tvm.expr.Ramp(x, 1, 2) - y,
tvm.expr.Ramp(x - y, 1, 2))
ck.verify(y - tvm.expr.Ramp(x, 1, 2) ,
tvm.expr.Ramp(y - x, -1, 2))
ck.verify(y.astype("int32x2") - x.astype("int32x2"),
(y - x).astype("int32x2"))
# Mul rules
ck.verify(y.astype("int32x2") * x.astype("int32x2"),
(y * x).astype("int32x2"))
ck.verify(tvm.expr.Ramp(x, 4, 4) * 2,
tvm.expr.Ramp(x * 2, 8, 4))
ck.verify(2 * tvm.expr.Ramp(x, 4, 4),
tvm.expr.Ramp(x * 2, 8, 4))
## Div rules
ck.verify(y.astype("int32x2") / x.astype("int32x2"),
(y / x).astype("int32x2"))
ck.verify(tvm.expr.Ramp(x, 4, 4) / 2,
tvm.expr.Ramp(x/ 2, 2, 4))
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(tvm.expr.Ramp(x * 8 + 1, 1, 4) / 8,
(x).astype("int32x4"))
ck.verify(tvm.expr.Ramp(x * 8 + 15, 1, 4) / 8,
tvm.expr.Ramp(x * 8 + 15, 1, 4) / 8)
## Mod rules
ck.verify(y.astype("int32x2") % x.astype("int32x2"),
(y % x).astype("int32x2"))
ck.verify(tvm.expr.Ramp(x, 4, 4) % 2,
tvm.expr.Broadcast(x % 2, 4))
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(tvm.expr.Ramp(x * 8 + 1, 1, 4) % 8,
tvm.expr.Ramp(1, 1, 4))
ck.verify(tvm.expr.Ramp(x * 8 + 1, 15, 4) % 8,
tvm.expr.Ramp(1, 15, 4) % 8)
# Min/Max rules
vx = tvm.var("vx", dtype="int32x2")
vc = tvm.var("vc", dtype="uint1")
ck.verify(tvm.min(y.astype("int32x2"), x.astype("int32x2")),
tvm.min(y, x).astype("int32x2"))
ck.verify(tvm.min(tvm.min(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.min(vx, tvm.min(y, x).astype("int32x2")))
ck.verify(tvm.max(y.astype("int32x2"), x.astype("int32x2")),
tvm.max(y, x).astype("int32x2"))
ck.verify(tvm.max(tvm.max(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.max(vx, tvm.max(y, x).astype("int32x2")))
## Logical rules
ck.verify(y.astype("int32x2").equal(x.astype("int32x2")),
(y.equal(x)).astype("uint1x2"))
ck.verify(tvm.expr.NE(y.astype("int32x2"), (x.astype("int32x2"))),
(tvm.expr.NE(y, x)).astype("uint1x2"))
ck.verify(y.astype("int32x2") > x.astype("int32x2"),
(x < y).astype("uint1x2"))
ck.verify(y.astype("int32x2") >= x.astype("int32x2"),
(x <= y).astype("uint1x2"))
ck.verify(y.astype("int32x2") < x.astype("int32x2"),
(y < x).astype("uint1x2"))
ck.verify(y.astype("int32x2") <= x.astype("int32x2"),
(y <= x).astype("uint1x2"))
ck.verify(tvm.expr.And(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.expr.And(y <= x, vc)).astype("uint1x2"))
ck.verify(tvm.expr.Or(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.expr.Or(y <= x, vc)).astype("uint1x2"))
def test_select_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
# Add rules
ck.verify(tvm.expr.Select(x < 0, y, 0) + tvm.expr.Select(x < 0, 1, z),
tvm.expr.Select(x < 0, y + 1, z))
ck.verify(tvm.expr.Select(x < 0, y, 1) - tvm.expr.Select(x < 0, 1, z),
tvm.expr.Select(x < 0, y + (-1), 1 - z))
ck.verify(tvm.expr.Select(x < 0, y, z) - y,
tvm.expr.Select(x < 0, 0, z - y))
ck.verify(tvm.expr.Select(x < 0, y, z) - z,
tvm.expr.Select(x < 0, y - z, 0))
ck.verify(tvm.min(tvm.expr.Select(x < 0, y, 0), tvm.expr.Select(x < 0, 1, z)),
tvm.expr.Select(x < 0, tvm.min(y, 1), tvm.min(0, z)))
ck.verify(tvm.max(tvm.expr.Select(x < 0, y, 0), tvm.expr.Select(x < 0, 1, z)),
tvm.expr.Select(x < 0, tvm.max(y, 1), tvm.max(0, z)))
ck.verify(tvm.expr.Select(x * 3 + 1 != 0, y, z), y)
ck.verify(tvm.expr.Select(x * 3 + 1 == 0, y, z), z)
ck.verify(tvm.expr.Select(x > 0, y + 1, y + 1), y + 1)
def test_add_index_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
ck.verify(x + (y - x), y)
ck.verify(x - (y + 1) + (y + 1), x)
ck.verify((x - 10) + (10 - z), x - z)
ck.verify((x - y) + (z - x), z - y)
ck.verify(tvm.min(x, y - z) + z, tvm.min(x + z, y))
ck.verify(tvm.min(x - z, y) + z, tvm.min(x, y + z))
ck.verify(tvm.max(x, y - 10) + 10, tvm.max(x + 10, y))
ck.verify(tvm.max(x - 11, y) + 11, tvm.max(x, y + 11))
ck.verify(tvm.max(x, y * 2) + tvm.min(x, y * 2), x + y * 2);
ck.verify(tvm.min(x, y * 2) + tvm.max(x, y * 2), x + y * 2);
ck.verify(tvm.max(x, y + 2) + (-2), tvm.max(x + (-2), y));
ck.verify(tvm.min(x, y + 2) + (-2), tvm.min(x + (-2), y));
ck.verify(tvm.min(x + 2, y + 3) + (-2), tvm.min(x, y + 1));
ck.verify(tvm.max(0, 1 - x * 4) + x * 4, tvm.max(x * 4, 1))
ck.verify(tvm.max(2 - x * 4, 0) + x * 4, tvm.max(x * 4, 2))
ck.verify(tvm.min(0, 1 - x * 4) + x * 4, tvm.min(x * 4, 1))
ck.verify(tvm.min(2 - x * 4, 0) + x * 4, tvm.min(x * 4, 2))
ck.verify(x * y + x * 10, x * (y + 10))
ck.verify(y * x + x * 10, x * (y + 10))
ck.verify(y * x + 10 * x, x * (y + 10))
ck.verify(x * y + 10 * x, x * (y + 10))
ck.verify(y * (x % 8) + 10 * (x % 8), (x % 8) * (y + 10))
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify((x / 8) * 8 + x % 8, x)
# canonicalization
ck.verify(x + 2 + 3 + 4 + x, x * 2 + 9);
ck.verify(x + 2 + 3 + 4 + x * 3, x * 4 + 9);
# conservative bound
try:
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1, 1000), override=True)
ck.verify((x / 8) * 8 + x % 8, x)
raise RuntimeError("bad")
except AssertionError:
pass
def test_sub_index_simplify():
ck = RewriteChecker()
x, y, z = tvm.var("x"), tvm.var("y"), tvm.var("z")
ck.verify(x + y - y, x)
ck.verify(x + y - x, y)
ck.verify(x - (y + x), 0 - y)
ck.verify(x - (x + y), 0 - y)
ck.verify(tvm.min(x, y) - x, tvm.min(0, y - x))
ck.verify(tvm.min(x, y) - y, tvm.min(x - y, 0))
ck.verify(tvm.max(x, y) - x, tvm.max(0, y - x))
ck.verify(tvm.max(x, y) - y, tvm.max(x - y, 0))
ck.verify(x - tvm.min(x, y), tvm.max(0, x - y))
ck.verify(y - tvm.min(x, y), tvm.max(y - x, 0))
ck.verify(x - tvm.max(x, y), tvm.min(0, x - y))
ck.verify(y - tvm.max(x, y), tvm.min(y - x, 0))
# mul co-efficient foldng
ck.verify(x - x, 0)
ck.verify(x * y - x, x * (y + (-1)))
ck.verify(x * y - 10 * x, x * (y + (-10)))
ck.verify(y * x - x * z, x * (y - z))
ck.verify(y * x - z * x, x * (y - z))
ck.verify(x + 10 - 20, x + (-10))
# 4-operands pattern
ck.verify((x + y) - (x + z), y - z)
ck.verify((y + x) - (x + z), y - z)
ck.verify((x + y) - (z + x), y - z)
ck.verify((y + x) - (z + x), y - z)
ck.verify(tvm.min(x + y, z) - x, tvm.min(y, z - x))
ck.verify(tvm.min(y + x, z) - x, tvm.min(y, z - x))
ck.verify(tvm.min(z, x + y) - x, tvm.min(z - x, y))
ck.verify(tvm.min(z, y + x) - x, tvm.min(z - x, y))
ck.verify(tvm.max(x + y, z) - x, tvm.max(y, z - x))
ck.verify(tvm.max(y + x, z) - x, tvm.max(y, z - x))
ck.verify(tvm.max(z, x + y) - x, tvm.max(z - x, y))
ck.verify(tvm.max(z, y + x) - x, tvm.max(z - x, y))
ck.verify(x - tvm.min(x + y, z), tvm.max(0 - y, x - z))
ck.verify(x - tvm.min(y + x, z), tvm.max(0 - y, x - z))
ck.verify(x - tvm.min(z, x + y), tvm.max(x - z, 0 - y))
ck.verify(x - tvm.min(z, y + x), tvm.max(x - z, 0 - y))
ck.verify(tvm.min(x, y) - tvm.min(y, x), 0)
ck.verify(tvm.max(x, y) - tvm.max(y, x), 0)
ck.verify(tvm.min(x, y) - tvm.min(x + 10, y + 10), -10)
ck.verify(tvm.min(x | |
infirmities infirmity infix inflame inflamed inflames
inflaming inflammable inflammation inflammations inflammatory
inflatable inflate inflated inflates inflating inflationary inflection
inflections inflicted inflicting inflicts influenza influx influxes
informality informant informants informational informer informers
infraction infractions infrared infrequently infringe infringed
infringements infringes infringing infuriate infuriated infuriates
infuriating infuse infused infuses infusing infusion infusions
ingeniously ingenuity ingest ingested ingesting ingests ingrain
ingrained ingraining ingrains ingratiate ingratiated ingratiates
ingratiating ingratitude inhale inhaled inhaler inhalers inhales
inhaling inheritances inhibitions inhospitable inhuman inhumane
inhumanities inhumanity initiation initiations initiatives initiator
initiators injected injecting injection injections injects injunction
injunctions injurious injustices inked inkier inkiest inking inkling
inks inky inlaid inland inlay inlaying inlays inlet inlets inmate
inmates inn innards innate inned innermost inners inning innings
innkeeper innkeepers innocenter innocentest innocently innocents
innocuous innovations inns innuendo innuendoed innuendoing innuendos
innumerable inoculate inoculated inoculates inoculating inoculation
inoculations inoffensive inoperative inopportune inordinate inquest
inquests inquisition inquisitions inquisitive ins insanely insaner
insanest insanity insatiable inscribe inscribed inscribes inscribing
inscription inscriptions inscrutable insecticide insecticides
insecurities insecurity insensitivity inseparable inseparables
insertions insider insiders insides insights insignia insignias
insignificance insincere insincerely insincerity insinuate insinuated
insinuates insinuating insinuation insinuations insipid insistent
insolence insolent insoluble insolubles insolvency insolvent
insolvents insomnia inspections inspector inspectors inspirations
instability instanced instancing instantaneous instantaneously
instants instep insteps instigate instigated instigates instigating
instigation instill instilled instilling instills instinctive
instincts instituted institutes instituting institutional instructive
instructor instructors instrumentals instrumented instrumenting
insubordinate insubordination insubstantial insufferable
insufficiently insular insulate insulated insulates insulating
insulation insulator insulators insulin insurances insure insured
insurer insurers insures insurgent insurgents insuring insurmountable
insurrection insurrections intakes intangible intangibles integrals
intellects intellectually intellectuals intelligently intelligible
intelligibly intenser intensest intensified intensifies intensify
intensifying intensities intensives intents intercede interceded
intercedes interceding intercept intercepted intercepting interception
interceptions intercepts interchange interchangeable interchanged
interchanges interchanging intercom intercoms interconnect
intercontinental interdependence interdependent interiors interject
interjected interjecting interjection interjections interjects
interlock interlocked interlocking interlocks interloper interlopers
interlude interluded interludes interluding intermarriage
intermarriages intermarried intermarries intermarry intermarrying
intermediaries intermediary intermediates interment interments
interminable interminably intermingle intermingled intermingles
intermingling intermission intermissions intermittently intern
internationally internationals interned interning interns
interplanetary interplay interpolation interpose interposed interposes
interposing interpreters interracial interred interring interrogated
interrogates interrogating interrogation interrogations interrogator
interrogators inters intersect intersected intersecting intersects
intersperse interspersed intersperses interspersing interstate
interstates interstellar intertwine intertwined intertwines
intertwining interventions interviewer interviewers interweave
interweaves interweaving interwove interwoven intestinal intestine
intestines inti intimacies intimacy intimated intimately intimates
intimating intimation intimations intimidate intimidated intimidates
intimidating intimidation intolerable intolerably intolerant
intonation intonations intoxicate intoxicated intoxicates intoxicating
intoxication intractable intramural intransitive intransitives
intravenous intravenouses intrepid intricacies intricacy intricate
intrigue intrigued intrigues intriguing introductions introspective
introvert introverts intrude intruded intruder intruders intrudes
intruding intrusion intrusions intrusive intrusives intuition
intuitions intuitively inundate inundated inundates inundating
inundation inundations invader invaders invalidated invalidates
invalidating invalided invaliding invalids invariable invariables
invariant invasions invective inventive inventoried inventories
inventors inventory inventorying inversely inverses inversion
inversions invertebrate invertebrates invested investigator
investigators investing investments investor investors invests
inveterate invigorate invigorated invigorates invigorating invincible
invisibility invisibly invitations invocation invocations invoice
invoiced invoices invoicing involuntarily involuntary involvements
invulnerable inward inwardly inwards iodine ions iota iotas irascible
irater iratest ire ired ires iridescence iridescent iring iris irises
irk irked irking irks ironed ironically ironies ironing irons
irradiate irradiated irradiates irradiating irrationally irrationals
irreconcilable irrefutable irregular irregularities irregularity
irregulars irrelevance irrelevances irreparable irreplaceable
irrepressible irreproachable irresistible irresponsibility
irretrievable irretrievably irreverence irreverent irreversible
irrevocable irrevocably irrigate irrigated irrigates irrigating
irrigation irritability irritable irritably irritant irritants
irritations islander islanders isle isles isthmus isthmuses italic
italics itch itched itches itchier itchiest itching itchy iterate
iteration iterations iterative itinerant itinerants itineraries
itinerary ivies ivories ivory ivy jab jabbed jabber jabbered jabbering
jabbers jabbing jabs jackal jackals jackass jackasses jackdaw jacked
jacking jackknife jackknifed jackknifes jackknifing jackknives jackpot
jackpots jacks jade jaded jades jading jagged jaggeder jaggedest
jaguar jaguars jailed jailer jailers jailing jails jalopies jalopy
jamb jambed jambing jamboree jamborees jambs jangle jangled jangles
jangling janitor janitors jar jarred jarring jars jaundice jaundiced
jaundices jaundicing jaunt jaunted jauntier jaunties jauntiest
jauntily jaunting jaunts jaunty javelin javelins jaw jawbone jawboned
jawbones jawboning jawed jawing jaws jay jays jaywalk jaywalked
jaywalker jaywalkers jaywalking jaywalks jazzed jazzes jazzing
jealousies jealously jealousy jeer jeered jeering jeers jell jelled
jellied jelling jells jellyfish jellyfishes jellying jeopardy jerked
jerkier jerkiest jerking jerks jerky jersey jerseys jested jester
jesters jesting jests jets jetted jetties jetting jettison jettisoned
jettisoning jettisons jetty jewel jewelries jewelry jewels jiffies
jiffy jig jigged jigging jiggle jiggled jiggles jiggling jigs jigsaw
jigsawed jigsawing jigsaws jilt jilted jilting jilts jingle jingled
jingles jingling jinx jinxed jinxes jinxing jitterier jitteriest
jitters jittery jobbed jobbing jockey jockeyed jockeying jockeys
jocular jog jogged jogger joggers jogging jogs jointed jointing joker
jokers jollied jollier jollies jolliest jollying jolt jolted jolting
jolts jostle jostled jostles jostling jot jots jotted jotting
journalism journeyed journeying journeys jovial jovially joyed joyful
joyfuller joyfullest joyfully joying joyous joyously joys joystick
jubilant jubilation jubilee jubilees judicial judicially judiciaries
judiciary judicious judiciously judo jug jugged juggernaut jugging
juggle juggled juggler jugglers juggles juggling jugs jugular jugulars
juiced juices juicier juiciest juicing juicy jumble jumbled jumbles
jumbling jumbo jumbos jumper jumpers jumpier jumpiest jumpy junctions
juncture junctures jungles juniors juniper junipers junked junket
junketed junketing junkets junkie junkier junkies junkiest junking
junks junta juntas juries jurisdiction juror jurors juster justest
justices justifications justly jut jute juts jutted jutting juveniles
juxtapose juxtaposed juxtaposes juxtaposing juxtaposition
juxtapositions kaleidoscope kaleidoscopes kangaroo kangarooed
kangarooing kangaroos karat karate karats kayak kayaked kayaking
kayaks keel keeled keeling keels keened keener keenest keening keenly
keens keepers keepsake keepsakes keg kegged kegging kegs kelp kennel
kennels kerchief kerchiefed kerchiefing kerchiefs kernels kerosene
ketchup kettles keyboarded keyboarding keyhole keyholes keynote
keynoted keynotes keynoting keystone keystones khaki khakis kickback
kickbacks kickoff kickoffs kidnapper kidnappers kidneys killers
killings kiln kilned kilning kilns kilo kilobyte kilobytes kilos
kilowatt kilowatts kilt kilts kimono kimonos kin kinda kinder
kindergarten kindergartens kindest kindle kindled kindles kindlier
kindliest kindling kindnesses kindred kinfolk kingdoms kingfisher
kingfishers kink kinked kinkier kinkiest kinking kinks kinky kins
kinship kiosk kiosks kipper kissed kisses kissing kitchened
kitchenette kitchenettes kitchening kitchens kite kited kites kiting
kitten kittens kitties kitty kiwi kiwis knack knacked knacker knacking
knacks knapsack knapsacks knead kneaded kneading kneads kneecap
kneecapped kneecapping kneecaps kneed kneeing kneel kneeling kneels
knelt knickers knifed knifes knifing knighted knighthood knighthoods
knighting knights knit knits knitted knitting knives knob knobs
knocker knockers knockout knockouts knoll knolls knot knots knotted
knottier knottiest knotting knotty knowinger knowingest knowingly
knowings knowledgeable knuckle knuckled knuckles knuckling koala
koalas kosher koshered koshering koshers kowtow kowtowed kowtowing
kowtows kudos laboratories laborious laboriously labyrinth labyrinths
lace laced lacerate lacerated lacerates lacerating laceration
lacerations laces lacier laciest lacing lacquer lacquered lacquering
lacquers lacrosse lacy laddered laddering ladders lade laded laden
lades lading ladle ladled ladles ladling lads ladybug ladybugs
ladylike laggard laggards lagged lagging lagoon lagoons lags lair
lairs laked lakes laking lamb lambda lambed lambing lambs lame lamed
lament lamentable lamentation lamentations lamented lamenting laments
lamer lames lamest laming lampoon lampooned lampooning lampoons lamps
lance lanced lances lancing lander landings landladies landlady
landlocked landlords landmark landmarks landowner landowners
landscaped landscapes landscaping landslid landslide landslides
landsliding lanes languid languish languished languishes languishing
languor languorous languors lankier lankiest lanky lantern lanterns
lap lapel lapels lapped lapping laps lapse lapsed lapses lapsing
larcenies larceny lard larded larding lards larges larked larking
larks larva larvae larynges laryngitis larynx lascivious lash lashed
lashes lashing lass lasses lastly latch latched latches latching
latent latents lateral lateraled lateraling laterals latex lath lathe
lathed lather lathered lathering lathers lathes lathing laths latitude
latitudes latrine latrines lattice lattices laud laudable lauded
lauding lauds laughable laughingstock laughingstocks launcher
launchers launder laundered laundering launders laundries laundry
laureate laureated laureates laureating laurel laurels lava lavatories
lavender lavendered lavendering lavenders lavish lavished lavisher
lavishes lavishest lavishing lawful lawless lawmaker lawmakers lawns
lawsuit lawsuits lax laxative laxatives laxer laxes laxest laxity
layered layering layman laymen layouts lazied lazier lazies laziest
lazying leaden leafed leafier leafiest leafing leafleted leafleting
leafs leafy leagued leagues leaguing leakage leakages leaked leaking
leaks leaky leaner leanest leaped leapfrog leapfrogged leapfrogging
leapfrogs leaping leaps lease leased leases leash leashed leashes
leashing leasing leathery lectern lecterns ledge ledger ledgered
ledgering ledgers ledges lee leech leeched leeches leeching leek leeks
leer leered leerier leeriest leering leers leery leeway lefter leftest
leftmost lefts legacies legacy legalistic legality legals legends
legged legging leggings legibility legibly legion legions legislate
legislated legislates legislating legislative legislator legislators
legislature legislatures legitimacy legitimated legitimates
legitimating legume legumes leisurely lemme lemonade lemoned lemoning
lemons lengthen lengthened lengthening lengthens lengthier lengthiest
lengthwise leniency lenients lentil lentils leopard leopards leotard
leotards leper lepers leprosy lesbians lesion lesions lessen lessened
lessening lessens letdown letdowns lethals lethargic lethargy lettered
letterhead letterheads lettering lettuce lettuces letup letups levee
levees lever leverage leveraged leverages leveraging levered levering
levers levied levies levity levy levying lewd lewder lewdest lexical
lexicon lexicons liabilities liaisons liar liars libels liberalism
liberally liberals liberate liberated liberates liberating liberation
libertarian librarians libretto lice lichen lichens lick licked
licking licks licorice licorices lids lieu lieutenant lieutenants
lifeboat lifeboats | |
undefined:
# if reverse: arr.reverse()
#elif reverse:
# arr = arr.slice(stop, start+1)
# arr.reverse()
#else:
# #if stop < 0: ## mozilla spec says negative indices are supported
# # stop = arr.length + stop
# arr = arr.slice(start, stop)
#return arr
@Array.prototype.__setslice__
def func(start, stop, step, items):
if start is undefined: start = 0
if stop is undefined: stop = this.length
arr = [start, stop-start]
for item in items: arr.push( item )
this.splice.apply(this, arr )
@Array.prototype.append
def func(item):
this.push( item )
return this
@Array.prototype.extend
def extend(other):
for obj in other:
this.push(obj)
return this
@Array.prototype.remove
def func(item):
index = this.indexOf( item )
this.splice(index, 1)
@Array.prototype.insert
def insert(index, obj):
if index < 0: index = this.length + index
this.splice(index, 0, obj)
@Array.prototype.index
def index(obj):
return this.indexOf(obj)
@Array.prototype.count
def count(obj):
a = 0
for item in this:
if item is obj: ## note that `==` will not work here, `===` is required for objects
a += 1
return a
## set-like features ##
@Array.prototype.bisect
def func(x, low, high):
if low is undefined: low = 0
if high is undefined: high = this.length
while low < high:
a = low+high
mid = Math.floor(a/2)
if x < this[mid]:
high = mid
else:
low = mid + 1
return low
## `-` operator
@Array.prototype.difference
def func(other):
f = lambda i: other.indexOf(i)==-1
return this.filter( f )
## `&` operator
@Array.prototype.intersection
def func(other):
f = lambda i: other.indexOf(i)!=-1
return this.filter( f )
## `<=` operator
@Array.prototype.issubset
def func(other):
for item in this:
if other.indexOf(item) == -1:
return False
return True
## non-standard utils ##
@Array.prototype.copy
def func():
arr = []
i = 0
while i < this.length:
arr.push( this[i] )
i += 1
return arr
_setup_array_prototype()
def _setup_nodelist_prototype():
with javascript:
@NodeList.prototype.__contains__
def func(a):
if this.indexOf(a) == -1: return False
else: return True
@NodeList.prototype.__len__
def func():
return this.length
@NodeList.prototype.get
def func(index):
return this[ index ]
@NodeList.prototype.__getitem__
def __getitem__(index):
if index < 0: index = this.length + index
return this[index]
@NodeList.prototype.__setitem__
def __setitem__(index, value):
if index < 0: index = this.length + index
this[ index ] = value
@NodeList.prototype.__iter__
def func():
with python:
return Iterator(this, 0)
@NodeList.prototype.index
def index(obj):
return this.indexOf(obj)
if __NODEJS__ == False and __WEBWORKER__ == False:
_setup_nodelist_prototype()
def bisect(a, x, low=None, high=None):
## bisect function from bisect module of the stdlib
with javascript:
return a.bisect(x, low, high)
def range(num, stop, step):
"""Emulates Python's range function"""
if stop is not undefined:
i = num
num = stop
else:
i = 0
if step is undefined:
step = 1
with javascript:
arr = []
while i < num:
arr.push(i)
i += step
return arr
def xrange(num, stop, step):
return range(num, stop, step)
def sum( arr ):
a = 0
for b in arr:
a += b
return a
class StopIteration: ## DEPRECATED
pass
def len(ob):
with javascript:
if instanceof(ob, Array):
return ob.length
elif __is_typed_array(ob):
return ob.length
elif instanceof(ob, ArrayBuffer):
return ob.byteLength
elif ob.__len__:
return ob.__len__()
else: #elif instanceof(ob, Object):
return Object.keys(ob).length
def next(obj):
return obj.next()
def map(func, objs):
with javascript: arr = []
for ob in objs:
v = func(ob)
with javascript:
arr.push( v )
return arr
def filter(func, objs):
with javascript: arr = []
for ob in objs:
if func( ob ):
with javascript:
arr.push( ob )
return arr
def min( lst ):
a = None
for value in lst:
if a is None: a = value
elif value < a: a = value
return a
def max( lst ):
a = None
for value in lst:
if a is None: a = value
elif value > a: a = value
return a
def abs( num ):
return JS('Math.abs(num)')
def ord( char ):
return JS('char.charCodeAt(0)')
def chr( num ):
return JS('String.fromCharCode(num)')
with javascript:
class __ArrayIterator:
def __init__(self, arr, index):
self.arr = arr
self.index = index
self.length = arr.length
def next(self):
index = self.index
self.index += 1
arr = self.arr
return JS('arr[index]')
class Iterator:
## rather than throwing an exception, it could be more optimized to have the iterator set a done flag,
## and another downside is having the try/catch around this makes errors in in the loop go slient.
def __init__(self, obj, index):
self.obj = obj
self.index = index
self.length = len(obj)
self.obj_get = obj.get ## cache this for speed
def next(self):
with javascript:
index = self.index
self.index += 1
return self.obj_get( [index], {} )
def tuple(a):
## TODO tuple needs a solution for dict keys
with javascript:
if Object.keys(arguments).length == 0: #arguments.length == 0:
return []
elif instanceof(a, Array):
return a.slice()
elif typeof(a) == 'string':
return a.split('')
else:
print a
print arguments
raise TypeError
def list(a):
with javascript:
if Object.keys(arguments).length == 0: #arguments.length == 0:
return []
elif instanceof(a, Array):
return a.slice()
elif typeof(a) == 'string':
return a.split('')
else:
print a
print arguments
raise TypeError
with javascript:
def __tuple_key__(arr):
r = []
i = 0
while i < arr.length:
item = arr[i]
t = typeof(item)
if t=='string':
r.append( "'"+item+"'")
elif instanceof(item, Array):
r.append( __tuple_key__(item) )
elif t=='object':
if item.__uid__ is undefined:
raise KeyError(item)
r.append( item.__uid__ )
else:
r.append( item )
i += 1
return r.join(',')
class dict:
# http://stackoverflow.com/questions/10892322/javascript-hashtable-use-object-key
# using a function as a key is allowed, but would waste memory because it gets converted to a string
# http://stackoverflow.com/questions/10858632/are-functions-valid-keys-for-javascript-object-properties
def __init__(self, js_object=None, pointer=None):
with javascript:
self[...] = {}
if pointer is not None:
self[...] = pointer
elif js_object:
ob = js_object
if instanceof(ob, Array):
for o in ob:
with lowlevel:
if instanceof(o, Array):
k= o[0]; v= o[1]
else:
k= o['key']; v= o['value']
try:
self.__setitem__( k,v )
except KeyError:
raise KeyError('error in dict init, bad key')
elif isinstance(ob, dict):
for key in ob.keys():
value = ob[ key ]
self.__setitem__( key, value )
else:
print 'ERROR init dict from:', js_object
raise TypeError
def jsify(self):
#keys = Object.keys( self[...] ) ## TODO check how this got broken, this should always be a low-level object?
keys = __object_keys__( self[...] )
for key in keys:
value = self[...][key]
if typeof(value) == 'object':
if hasattr(value, 'jsify'):
self[...][key] = value.jsify()
elif typeof(value) == 'function':
raise RuntimeError("can not jsify function")
return self[...]
def copy(self):
return dict( self )
def clear(self):
with javascript:
self[...] = {}
def has_key(self, key):
__dict = self[...]
if JS("typeof(key) === 'object' || typeof(key) === 'function'"):
# Test undefined because it can be in the dict
key = key.__uid__
if JS("key in __dict"):
return True
else:
return False
def update(self, other):
for key in other:
self.__setitem__( key, other[key] )
def items(self):
arr = []
for key in self.keys():
arr.append( [key, self[key]] )
return arr
def get(self, key, _default=None):
try:
return self[key]
except:
return _default
def set(self, key, value):
self.__setitem__(key, value)
def __len__(self):
__dict = self[...]
return JS('Object.keys(__dict).length')
def __getitem__(self, key):
'''
note: `"4"` and `4` are the same key in javascript, is there a sane way to workaround this,
that can remain compatible with external javascript?
'''
with javascript:
__dict = self[...]
err = False
if instanceof(key, Array):
#key = JSON.stringify( key ) ## fails on objects with circular references ##
key = __tuple_key__(key)
elif JS("typeof(key) === 'object' || typeof(key) === 'function'"):
# Test undefined because it can be in the dict
if JS("key.__uid__ && key.__uid__ in __dict"):
return JS('__dict[key.__uid__]')
else:
err = True
if __dict and JS("key in __dict"):
return JS('__dict[key]')
else:
err = True
if err:
msg = "missing key: %s -\n" %key
raise KeyError(__dict.keys())
def __setitem__(self, key, value):
with javascript:
if key is undefined:
raise KeyError('undefined is invalid key type')
if key is null:
raise KeyError('null is invalid key type')
__dict = self[...]
if instanceof(key, Array):
#key = JSON.stringify( key ) ## fails on objects with circular references ##
key = __tuple_key__(key)
if key is undefined:
raise KeyError('undefined is invalid key type (tuple)')
inline( '__dict[key] = value')
elif JS("typeof(key) === 'object' || typeof(key) === 'function'"):
if JS("key.__uid__ === undefined"):
# "" is needed so that integers can also be used as keys #
JS(u"key.__uid__ = '' + _PythonJS_UID++")
JS('__dict[key.__uid__] = value')
else:
JS('__dict[key] = value')
def keys(self):
with lowlevel:
return Object.keys( self[...] )
def pop(self, key, d=None):
v = self.get(key, None)
if v is None:
return d
else:
js_object = self[...]
JS("delete js_object[key]")
return v
def values(self):
with javascript:
keys = Object.keys( self[...] )
out = []
for key in keys:
out.push( self[...][key] )
return out
def __contains__(self, value):
try:
self[value]
return True
except:
return False
def __iter__(self):
return Iterator(self.keys(), 0)
def set(a):
'''
This returns an array that is a minimal implementation of set.
Often sets are used simply to remove duplicate entries from a list,
and then it get converted back to a list, it is safe to use fastset for this.
The array prototype is overloaded with basic set functions:
difference
intersection
issubset
Note: sets in Python are not subscriptable, but can be iterated over.
Python docs say that set are unordered, some programs may rely on this disorder
for randomness, for sets of integers we emulate the unorder only uppon initalization
of the set, by masking the value by bits-1. Python implements sets starting with an
array of length 8, and mask of 7, if set length grows to 6 (3/4th), then it allocates
a new array of length 32 and mask of 31. This is only emulated for arrays of
integers up to an array length of 1536.
'''
with javascript:
hashtable = null
if a.length <= 1536:
hashtable = {}
keys = []
if a.length < 6: ## hash array length 8
mask = 7
elif a.length < 22: ## 32
mask = 31
elif a.length < 86: ## 128
mask = 127
elif a.length < 342: ## 512
mask = 511
else: ## 2048
mask = 2047
fallback = False
if hashtable:
for b in a:
if typeof(b)=='number' and b is (b|0): ## set if integer
key = b & mask
hashtable[ key ] = b
keys.push( key )
else:
fallback = True
break
else:
fallback = True
s = []
if fallback:
for item in a:
if s.indexOf(item) == -1:
s.push( item )
else:
keys.sort()
for key in keys:
s.push( hashtable[key] )
return s
def frozenset(a):
return set(a)
class array:
## note that class-level dicts can only be used after the dict class has been defined above,
## however, we can still not rely on using a dict here because dict creation relies on get_attribute,
## and get_attribute relies on __NODEJS__ global variable to be set to False when inside NodeJS,
## to be safe this is changed to use JSObjects
with javascript:
typecodes = {
'c': 1, # char
'b': 1, # signed char
'B': 1, # unsigned char
'u': 2, # unicode
'h': 2, # signed short
'H': 2, # unsigned short
'i': | |
import pytest
from tests.utils import assert_bindings
def test_g_day_min_inclusive005_1268_g_day_min_inclusive005_1268_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=- - -01
and facet=maxExclusive and value=- - -30) and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_minInclusive005.xsd",
instance="msData/datatypes/Facets/gDay/gDay_minInclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_min_inclusive004_1267_g_day_min_inclusive004_1267_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=- - -01
and facet=maxInclusive and value=- - -30) and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_minInclusive004.xsd",
instance="msData/datatypes/Facets/gDay/gDay_minInclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_min_inclusive003_1266_g_day_min_inclusive003_1266_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=- - -01
and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_minInclusive003.xsd",
instance="msData/datatypes/Facets/gDay/gDay_minInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_min_inclusive001_1264_g_day_min_inclusive001_1264_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=- - -01
and document value=- - -01
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_minInclusive001.xsd",
instance="msData/datatypes/Facets/gDay/gDay_minInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_max_exclusive003_1263_g_day_max_exclusive003_1263_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxExclusive and value=- - -30
and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_maxExclusive003.xsd",
instance="msData/datatypes/Facets/gDay/gDay_maxExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_max_inclusive003_1260_g_day_max_inclusive003_1260_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=- - -30
and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_maxInclusive003.xsd",
instance="msData/datatypes/Facets/gDay/gDay_maxInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_max_inclusive001_1258_g_day_max_inclusive001_1258_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=- - -01
and document value=- - -01
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_maxInclusive001.xsd",
instance="msData/datatypes/Facets/gDay/gDay_maxInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_enumeration004_1257_g_day_enumeration004_1257_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=- - -15 -
- -01 - - -30 and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_enumeration004.xsd",
instance="msData/datatypes/Facets/gDay/gDay_enumeration004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_enumeration002_1255_g_day_enumeration002_1255_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=- - -15
and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_enumeration002.xsd",
instance="msData/datatypes/Facets/gDay/gDay_enumeration002.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_day_pattern001_1253_g_day_pattern001_1253_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=pattern and value=- - -[0-9]{2}
and document value=- - -15
"""
assert_bindings(
schema="msData/datatypes/Facets/gDay/gDay_pattern001.xsd",
instance="msData/datatypes/Facets/gDay/gDay_pattern001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_exclusive005_1252_g_month_day_min_exclusive005_1252_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=-
-01-01 and facet=maxExclusive and value=- -10-01) and document value=-
-03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive005.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_exclusive004_1251_g_month_day_min_exclusive004_1251_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=-
-01-01 and facet=maxInclusive and value=- -10-01) and document value=-
-03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive004.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_exclusive003_1250_g_month_day_min_exclusive003_1250_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minExclusive and value=- -01-01
and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive003.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_inclusive005_1247_g_month_day_min_inclusive005_1247_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=-
-01-01 and facet=maxExclusive and value=- -10-01) and document value=-
-03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive005.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_inclusive004_1246_g_month_day_min_inclusive004_1246_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=-
-01-01 and facet=maxInclusive and value=- -10-01) and document value=-
-03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive004.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_inclusive003_1245_g_month_day_min_inclusive003_1245_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=- -01-01
and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive003.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_min_inclusive001_1243_g_month_day_min_inclusive001_1243_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=- -01-01
and document value=- -01-01
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive001.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_minInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_max_exclusive003_1242_g_month_day_max_exclusive003_1242_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxExclusive and value=- -10-01
and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_maxExclusive003.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_maxExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_max_inclusive003_1239_g_month_day_max_inclusive003_1239_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=- -10-01
and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_maxInclusive003.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_maxInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_max_inclusive001_1237_g_month_day_max_inclusive001_1237_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=- -01-01
and document value=- -01-01
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_maxInclusive001.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_maxInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_enumeration004_1236_g_month_day_enumeration004_1236_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=- -03-15
- -01-01 - -10-01 and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_enumeration004.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_enumeration004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_enumeration002_1234_g_month_day_enumeration002_1234_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=- -03-15
and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_enumeration002.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_enumeration002.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_month_day_pattern001_1232_g_month_day_pattern001_1232_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=pattern and value=-
-[0-9]{2}-[0-9]{2} and document value=- -03-15
"""
assert_bindings(
schema="msData/datatypes/Facets/gMonthDay/gMonthDay_pattern001.xsd",
instance="msData/datatypes/Facets/gMonthDay/gMonthDay_pattern001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_exclusive005_1231_g_year_min_exclusive005_1231_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=1998
and facet=maxExclusive and value=2002) and document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minExclusive005.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minExclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_exclusive004_1230_g_year_min_exclusive004_1230_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=1998
and facet=maxInclusive and value=2002) and document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minExclusive004.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minExclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_exclusive003_1229_g_year_min_exclusive003_1229_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minExclusive and value=1998 and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minExclusive003.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_inclusive005_1226_g_year_min_inclusive005_1226_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=1998
and facet=maxExclusive and value=2002) and document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minInclusive005.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minInclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_inclusive004_1225_g_year_min_inclusive004_1225_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=1998
and facet=maxInclusive and value=2002) and document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minInclusive004.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minInclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_inclusive003_1224_g_year_min_inclusive003_1224_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=1998 and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minInclusive003.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_min_inclusive001_1222_g_year_min_inclusive001_1222_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=1998 and
document value=1998
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_minInclusive001.xsd",
instance="msData/datatypes/Facets/gYear/gYear_minInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_max_exclusive003_1221_g_year_max_exclusive003_1221_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxExclusive and value=2002 and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_maxExclusive003.xsd",
instance="msData/datatypes/Facets/gYear/gYear_maxExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_max_inclusive003_1218_g_year_max_inclusive003_1218_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=2002 and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_maxInclusive003.xsd",
instance="msData/datatypes/Facets/gYear/gYear_maxInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_max_inclusive001_1216_g_year_max_inclusive001_1216_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=maxInclusive and value=1998 and
document value=1998
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_maxInclusive001.xsd",
instance="msData/datatypes/Facets/gYear/gYear_maxInclusive001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_enumeration004_1215_g_year_enumeration004_1215_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=2000 1999
2038 and document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_enumeration004.xsd",
instance="msData/datatypes/Facets/gYear/gYear_enumeration004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_enumeration002_1213_g_year_enumeration002_1213_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=2000 and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_enumeration002.xsd",
instance="msData/datatypes/Facets/gYear/gYear_enumeration002.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_pattern001_1211_g_year_pattern001_1211_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=pattern and value=[0-9]{4} and
document value=2000
"""
assert_bindings(
schema="msData/datatypes/Facets/gYear/gYear_pattern001.xsd",
instance="msData/datatypes/Facets/gYear/gYear_pattern001.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_exclusive005_1210_g_year_month_min_exclusive005_1210_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=2000-12
and facet=maxExclusive and value=2001-12) and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive005.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_exclusive004_1209_g_year_month_min_exclusive004_1209_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minExclusive and value=2000-12
and facet=maxInclusive and value=2001-12) and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive004.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_exclusive003_1208_g_year_month_min_exclusive003_1208_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minExclusive and value=2000-12
and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive003.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minExclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_inclusive005_1205_g_year_month_min_inclusive005_1205_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=2000-12
and facet=maxExclusive and value=2001-12) and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive005.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive005.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_inclusive004_1204_g_year_month_min_inclusive004_1204_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : (facet=minInclusive and value=2000-12
and facet=maxInclusive and value=2001-12) and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive004.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_g_year_month_min_inclusive003_1203_g_year_month_min_inclusive003_1203_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=minInclusive and value=2000-12
and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive003.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_minInclusive003.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
| |
<reponame>teambotmax/t1
#RENGGA===Version SB
import linepy
from linepy import *
from akad.ttypes import *
from datetime import datetime
import pytz, pafy, time, asyncio, random, multiprocessing, timeit, sys, json, ctypes, codecs, tweepy, threading, glob, re, ast, six, os, subprocess, wikipedia, atexit, urllib, urllib.parse, urllib3, string, tempfile, shutil, unicodedata
from humanfriendly import format_timespan, format_size, format_number, format_length
import html5lib
import requests,json,urllib3
from random import randint
from bs4 import BeautifulSoup
#from gtts import gTTS
from googletrans import Translator
import youtube_dl
from time import sleep
from zalgo_text import zalgo
from threading import Thread,Event
import requests,uvloop
import wikipedia as wiki
requests.packages.urllib3.disable_warnings()
loop = uvloop.new_event_loop()
#==============================================================================#
botStart = time.time()
msg_dict = {}
msg_dict1 = {}
#==============[ token 1 ]==============#
cl = LINE("EGkQIpyN5nBFedtZKsT6.LvNJQB+/nDOHX+OKJy9FbG.DQGS6rc5r3Z0ilcT3IEU8s57w+rtmR0PZ9MuDKLcCCQ=")
cl.log("Auth Token : " + str(cl.authToken))
cl.log("Timeline Token : " + str(cl.tl.channelAccessToken))
#ki = LINE("ECdm6SBDFcSQbB92wzX5.1PdWISfJEQ4F83O0j89lDq.JodVBppuZDrGubzT5z2f0aDSMKYdjL8JVkK68+IV8Dk=")
#ki.log("Auth Token : " + str(ki.authToken))
#ki.log("Timeline Token : " + str(ki.tl.channelAccessToken))
#kk = LINE("ECAwXp5sPIBzbr4OUbLc.hWg0o07213VLVCn7xM25Ra.baCad+HAHXkLT+fvWaAiMb1XpJhX1WslmRPBrm3r1d8=")
#kk.log("Auth Token : " + str(kk.authToken))
#kk.log("Timeline Token : " + str(kk.tl.channelAccessToken))
#kc = LINE("ECsvzXirmdEkBI6I05q2.8MR2Nwc8pKJ7uYeNTWsieG.1SCO8Kgzzd6xIpvc/5Kf5VbszTV4IIQv244nW1uH0Ac=")
#kc.log("Auth Token : " + str(kc.authToken))
#ko = LINE("ECT0USVbXIyy6LG2LSo9.ldVvgada5Tbn2m08z3h2kq.+<KEY>=")
#ko.log("Auth Token : " + str(ko.authToken))
#jk = LINE("ECW6O4s19hZIZrkUAji8.<KEY>
#jk.log("Auth Token : " + str(jk.authToken))
#sw = LINE("ECQAqe2syyJWB4P8Omk7.gRXSPo8nFcRN2ReIOJpnPW.QLsJW4YQfEUD3P1L0EBxDtOIW7LvfpflPnLgRvgh3rc=")
#sw.log("Auth Token : " + str(sw.authToken))
#==============[●●●●●●]==============#
print ("\n\
======== \n\
====== \n\
==== \n")
print (" ======== THANKS TO ALLOH SWT --------")
print (" ======== THANKS TO PY3 ========")
print (" ======== KEEP STAY AND RULLES ========")
print ("\n\
======================= ======= =========== \n\
======================= ==== ============= \n\
======== == ====== ===== \n\
======== ====== ====== \n\
======== ====== ====== \n\
======== ====== ====== \n\
======== ======================== \n\
======== ========================== \n\
======== ====== ======\n\
============ ========= =========\n")
print ("=========== LOGIN SUCSES ==========")
oepoll = OEPoll(cl)
mid = cl.profile.mid
mid = cl.getProfile().mid
#Amid = ki.getProfile().mid
#Bmid = kk.getProfile().mid
#Cmid = kc.getProfile().mid
#Dmid = ko.getProfile().mid
#Emid = jk.getProfile().mid
#Zmid = sw.getProfile().mid
#===========================================================================================
KAC = [cl]
ABC = [cl]
Bots = [mid]
creator = ["u9559e<PASSWORD>da2<PASSWORD>d8f5f06"]
owner = ["u9559<PASSWORD>"]
admin = ["u9<PASSWORD>"]
staff = ["u<PASSWORD>"]
Saints = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
welcome = []
#responsename1 = ki.getProfile().displayName
#responsename2 = kk.getProfile().displayName
#responsename3 = kc.getProfile().displayName
#responsename4 = ko.getProfile().displayName
#responsename5 = jk.getProfile().displayName
settings = {
"Picture":False,
"group":{},
"groupPicture":False,
"changePicture":False,
"comment":"╔═════════════════════\n║ 🐯TEAM SOAK KILLER🐯\n║💣OPEN ORDER💣\n╠═════════════════════\n╠➩SELFTBOT ONLY\n╠➩SELFTBOT + ASIST\n╠➩1 AKUN UTAMA 75K\n╠➩1AKUN UTAMA + 2 ASIST 150K\n╠➩1AKUN UTAMA + 3 ASIST 175K\n╠➩1AKUN UTAMA + 4 ASIST 200K\n╠➩1AKUN UTAMA + 5 ASIST 225K\n╠➩1 AKUN UTAMA + 9 BOT +1 SIRI LUAR 300K\n╠➩ ProtectBot 3-20 Bot Assist\n╠═════════════════════\n║ [👇 Minat Silahan hub👇]\n╠➩line://ti/p/~rozikeane\n╠➩line://ti/p/~Rozikeane\n╠➩line://ti/p/~rozikeane\n╠➩TERIMAKASIH\n║═════════════════════\n╠➩C̽̅̈́̅ŕ̛̋͛e͛̃̾̍ǻ̕͘t͒̉̈̎o͊̓̌͠r̐̇̾̐ B̝̪̭͓͍̺͐͂̑̅̓͗͗̈́͢y͚͔̝͖̮̤͚̅̉̑̐̓̀̋̊͂͜͜͢͞:̶̨̻̪͓̦̻̋̾̂̽̎͘͜͜Roy keane>TͫͪͤͯE͊ͣ̓̕A͛̍̉̉Mͯ͌̍̽ N̓̔E̐ͮͤ̉҉̢̦Wͨͬͨͦ҉̢̨͔̥ ҉̯̖̬̀Ge̶̮̳͕͍̺̼̱͎͛̃̾̍̑̐N̓̔e̶̮̳͕͍̺̼̱͎͛̃̾̍̑̐r̵̺̲̗̩͙̐̇̾̐͐̏̃̓́̊ǻ͕͍̻̭̟̪͒́́̎̕͘̕͟TͫͪͤͯiO̸ͬ̂̀N̓̔҉̣͉S̷͌̋̅༻>\n║ ═════════════════════\n╠➩ Support By: M.Rozikin\n╚══════════════════════",
"autoJoinTicket":False,
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
wait = {
"limit": 1,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"staff":{},
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":True,
"contact":False,
'autoJoin':True,
'autoAdd':True,
'left':False,
'autoLeave':False,
'autoLeave1':False,
"detectMention":False,
"Mentionkick":False,
"welcomeOn":False,
"sticker":False,
"selfbot":True,
"likeOn":True,
'autoBlock':False,
"unsend":True,
"arespon":True,
"mention":"Tuh Yang ngintip mending Gabung Chat sini -_-",
"Respontag":"YANG NGETAG2 GAK JELAS TENGGELAMKAN -_-",
"welcome":"Selamat datang & semoga betah n bahagia",
"message":"──────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅──────\nᴼᴾᴱᴺ ᴼᴿᴰᴱᴿ\n────────┅┅───────\n➣ꜱᴇʟꜰʙᴏᴛ ᴏɴʟʏ\n➣ꜱᴇʟꜰʙᴏᴛ + ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 2 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 3 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 4 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 5 ᴀꜱɪꜱᴛ\n➣ʙᴏᴛᴘʀᴏᴛᴇᴄᴛ 3-30 ʙᴏᴛ ᴀꜱɪꜱᴛ\n➣ɴᴇᴡ ꜱᴄʀɪᴘᴛ\n➣ʜʀɢᴀ ʙɪꜱᴀ ɴᴇɢᴏ\n─────────┅┅─────────\n ✯❇͜͡❇͜͡C͜͡r͜͡e͜͡a͜͡t͜͡o͜͡r✯͜͡$͜͡ë͜͡I͜͡F͜͡-͜͡฿͜͜͡͡o͜͡t͜͡ ͜͡❇͜͡❇✯\nline.me/ti/p/~keyla_77\n➣ѕєʟғвот κɪcκєʀ_+_ᴘʀᴏᴛᴇᴄᴛ\n────────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅────────",
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "Total Mention User「{}」\n\n [ Mention ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = " ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += settings["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n???[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n???[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to,"Hay kk")
def welcomeMembers(to, mid):
try:
arrData = ""
textx = " ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += "welcome"
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n???[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n???[ Success ]"
# client.sendMessage(to, textx)
except Exception as error:
cl.sendMessage(to)
def executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey):
if cmd.startswith('ex\n'):
if sender in clientMid:
try:
sep = text.split('\n')
ryn = text.replace(sep[0] + '\n','')
f = open('exec.txt', 'w')
sys.stdout = f
print(' ')
exec(ryn)
print('\n%s' % str(datetime.now()))
f.close()
sys.stdout = sys.__stdout__
with open('exec.txt','r') as r:
txt = r.read()
cl.sendMessage(to, txt)
except Exception as e:
pass
else:
cl.sendMessage(to, 'Apalo !')
elif cmd.startswith('exc\n'):
if sender in clientMid:
sep = text.split('\n')
ryn = text.replace(sep[0] + '\n','')
if 'print' in ryn:
ryn = ryn.replace('print(','cl.sendExecMessage(to,')
exec(ryn)
else:
exec(ryn)
else:
cl.sendMessage(to, 'Apalo !')
def logError(text):
cl.log("[ WIRO_212] {}".format(str(text)))
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("errorLog.txt","a") as error:
error.write("\n[{}] {}".format(str(time), text))
def sendTemplates(to, data):
data = data
url = "https://api.line.me/message/v3/share"
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Linux; Android 8.1.0; Redmi Note 5 Build/OPM1.171019.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 Line/8.1.1'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = '<KEY>'
sendPost = requests.post(url, data=json.dumps(data), headers=headers)
print(sendPost)
return sendPost
def sendTextTemplate(to, text):
data = {
"type": "flex",
"altText": "wiro_212 bot",
"contents": {
"styles": {
"body": {
"backgroundColor": "#000000"
}
},
"type": "bubble",
"body": {
"contents": [
{
"contents": [
{
"contents": [
{
"text": text,
"size": "md",
"margin": "none",
"color": "#F0F8FF",
"wrap": True,
"weight": "regular",
"type": "text"
}
],
"type": "box",
"layout": "baseline"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "md",
"layout": "vertical"
}
}
}
cl.postTemplate(to, data)
def sendTextTemplate8(to, text):
data = {
"type": "flex",
"altText": "{} menghapus anda dari | |
# PYTRIS™ Copyright (c) 2017 <NAME> All Rights Reserved.
import pygame
import operator
from tkinter import *
from mino import *
from random import *
from pygame.locals import *
import ctypes
# Define
block_size = 35 # Height, width of single block
width = 10 # Board width
height = 20 # Board height
framerate = 30 # Bigger -> Slower
movement_keys = {'left': 0, 'right': 0}
movement_keys_speed = 0.05
movement_keys_timer = movement_keys_speed * 2
pygame.init() # pygame 모듈 생성
clock = pygame.time.Clock() # 타임트렉커 생성
screen = pygame.display.set_mode((1200, 730))
def toggle_fullscreen(screen):
if event.key == K_LEFT:
screen = pygame.display.set_mode((1200, 730), FULLSCREEN | HWSURFACE | DOUBLEBUF)
else :
screen = pygame.display.set_mode((1200, 730))
toggle_fullscreen.full = not toggle_fullscreen.full
return screen
toggle_fullscreen.full = False
#screen = pygame.display.set_mode((1200, 730), FULLSCREEN | HWSURFACE | DOUBLEBUF) # 창크기 설정 1200 * 730, 하드웨어 가속, 더블버퍼 모드
#screen = pygame.display.set_mode((1200, 730))
pygame.time.set_timer(pygame.USEREVENT, framerate * 10) # 유저이벤트 0.3초마다 입력
pygame.display.set_caption("OPENMIND TETRIS™")
volume = 1.0
class ui_variables:
# Fonts
font_path = "./assets/fonts/Maplestory_Light.ttf"
font_path_b = "./assets/fonts/OpenSans-Bold.ttf"
font_path_i = "./assets/fonts/Inconsolata/Inconsolata.otf"
h1 = pygame.font.Font(font_path, 53) ##
h2 = pygame.font.Font(font_path, 42)
h4 = pygame.font.Font(font_path, 32)
h5 = pygame.font.Font(font_path, 20) # press space
h6 = pygame.font.Font(font_path, 10) # copyright
h1_b = pygame.font.Font(font_path_b, 50)
h2_b = pygame.font.Font(font_path_b, 40)
h2_i = pygame.font.Font(font_path_i, 35)
h5_i = pygame.font.Font(font_path_i, 13)
# Sounds
pygame.mixer.music.load("assets/sounds/SFX_BattleMusic.wav")
pygame.mixer.music.set_volume(0.1)
intro_sound = pygame.mixer.Sound("assets/sounds/SFX_Intro.wav")
intro_sound.set_volume(volume)
fall_sound = pygame.mixer.Sound("assets/sounds/SFX_Fall.wav")
fall_sound.set_volume(volume)
break_sound = pygame.mixer.Sound("assets/sounds/SFX_Break.wav")
break_sound.set_volume(volume)
click_sound = pygame.mixer.Sound("assets/sounds/SFX_ButtonUp.wav")
move_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceMoveLR.wav")
move_sound.set_volume(volume)
drop_sound = pygame.mixer.Sound("assets/sounds/SFX_PieceHardDrop.wav")
drop_sound.set_volume(volume)
single_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearSingle.wav")
single_sound.set_volume(volume)
double_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearDouble.wav")
double_sound.set_volume(volume)
triple_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialLineClearTriple.wav")
triple_sound.set_volume(volume)
tetris_sound = pygame.mixer.Sound("assets/sounds/SFX_SpecialTetris.wav")
tetris_sound.set_volume(volume)
GameOver_sound = pygame.mixer.Sound("assets/sounds/SFX_GameOver.wav")
GameOver_sound.set_volume(volume)
LevelUp_sound = pygame.mixer.Sound("assets/sounds/SFX_LevelUp.wav")
LevelUp_sound.set_volume(volume)
combos = [] # 콤보 그래픽
large_combos = [] # 사이즈 조절한 콤보 그래픽
combo_ring = pygame.image.load("assets/Combo/4combo ring.png") # 4블록 제거용 그래픽
combo_4ring = pygame.transform.scale(combo_ring, (200, 100))
for i in range (1,11):
combos.append(pygame.image.load("assets/Combo/"+str(i)+"combo.png"))
large_combos.append(pygame.transform.scale(combos[i-1], (150, 200))) # 사진크기 조절
combos_sound = []
for i in range(1, 10) :
combos_sound.append(pygame.mixer.Sound("assets/sounds/SFX_"+str(i+2)+"Combo.wav"))
# Background colors
black = (10, 10, 10) #rgb(10, 10, 10)
#yellow = (10, 10, 10)
white = (255, 255, 240) #rgb(255, 255, 255) 오른쪽 바(아이보리)
grey_1 = (70, 130, 180) #rgb(26, 26, 26) 파란색(238,130,238)(70, 130, 180)
#blue = (30,30,30)
grey_2 = (221, 221, 221) #rgb(35, 35, 35)테트리스 게임내 배경(회색)(221, 221, 221) (135,206,235)
grey_3 = (000,000,139) #rgb(55, 55, 55) 블록 그림자 색
# Tetrimino colors
cyan = (69, 206, 204) #rgb(69, 206, 204) # I
blue = (64, 111, 249) #rgb(64, 111, 249) # J
orange = (253, 189, 53) #rgb(253, 189, 53) # L
yellow = (246, 227, 90) #rgb(246, 227, 90) # O
green = (98, 190, 68) #rgb(98, 190, 68) # S
pink = (242, 64, 235) #rgb(242, 64, 235) # T
red = (225, 13, 27) #rgb(225, 13, 27) # Z
t_color = [grey_2, cyan, blue, orange, yellow, green, pink, red, grey_3]
def checkCombo(combo_count,sent):
if combo_count > 0:
if combo_count <= 8:
sent += (combo_count+1)/2
else :
sent += 4
return sent
def draw_image(window,img_path, x,y,width,height):
image= pygame.image.load(img_path)
image = pygame.transform.scale(image,(width,height))
window.blit(image,(x,y))
class button():
def __init__(self, color, x,y,width,height, text='',img='', img_clicked=''):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
self.image = img
self.image = img_clicked
def draw(self,win,outline=None):
#Call this method to draw the button on the screen
if outline:
# pygame.draw.rect(win, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0)
draw_image(screen, self.image ,self.x, self.y, self.width, self.height )
pygame.draw.rect(win, self.color, (self.x,self.y,self.width,self.height),0)
if self.text != '':
font = pygame.font.SysFont('comicsans', 60)
text = font.render(self.text, 1, (0,0,0))
win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#Pos is the mouse position or a tuple of (x,y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False
# 버튼 클래스
# 버튼 생성
# 버튼 이미지 경로
# 버튼 선택했을 때 이미지 경로
# 버튼 id 값
# 버튼
# Draw block
def draw_block(x, y, color): # 블럭 그리는 함수
pygame.draw.rect(
screen,
color,
Rect(x, y, block_size, block_size)
)
pygame.draw.rect(
screen,
ui_variables.grey_1,
Rect(x, y, block_size, block_size),
1
)
def draw_multiboard(next_1P,hold_1P,next_2P,hold_2P,score,level,goal):
screen.fill(ui_variables.grey_1)
draw_board(next_1P,hold_1P,score,level,goal)
draw_2Pboard(next_2P,hold_2P,score,level,goal)
# Draw game screen
def draw_board(next, hold, score, level, goal):
# Draw sidebar
pygame.draw.rect(
screen,
ui_variables.white,
Rect(384, 0, 180, 730)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4): # 16개의 그리드 칸에서 true인 값만 뽑아서 draw.rect
for j in range(4):
dx = 415 + block_size * j
dy = 220 + block_size * i
if grid_n[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_n[i][j]]) # 다음 블럭의 형상 가독성을 높임.
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = 415 + block_size * j
dy = 60 + block_size * i
if grid_h[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_h[i][j]])
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.black)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.black)
text_combo = ui_variables.h5.render("COMBO", 1, ui_variables.black) # 콤보
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.black)
combo_value = ui_variables.h4.render(str(combo_count), 1, ui_variables.black) # 콤보 값
score_value = ui_variables.h4.render(str(score), 1, ui_variables.black)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.black)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.black)
text_goal = ui_variables.h5.render("GOAL", 1, ui_variables.black)
goal_value = ui_variables.h4.render(str(goal), 1, ui_variables.black)
# Place texts
### <<<<<<< HEAD
screen.blit(text_hold, (215, 14))
screen.blit(text_next, (215, 104))
screen.blit(text_score, (215, 194))
screen.blit(score_value, (220, 210))
screen.blit(text_level, (215, 254))
screen.blit(level_value, (220, 270))
screen.blit(text_goal, (215, 314))
screen.blit(goal_value, (220, 330))
## =======
screen.blit(text_hold, (415, 20))
screen.blit(text_next, (415, 170))
screen.blit(text_score, (415, 340))
screen.blit(score_value, (420, 370))
screen.blit(text_level, (415, 470))
screen.blit(level_value, (420, 500))
#screen.blit(text_goal, (415, 600))
#screen.blit(goal_value, (420, 630))
screen.blit(text_combo,(415,600))
screen.blit(combo_value,(420,630))
# >>>>>>> 9019b1371478d307091a004b6d22207004f8782c
# Draw board
for x in range(width):
for y in range(height):
dx = 17 + block_size * x
dy = 17 + block_size * y
draw_block(dx, dy, ui_variables.t_color[matrix[x][y + 1]])
def draw_2Pboard(next, hold, score, level, goal):
# Draw sidebar
pygame.draw.rect(
screen,
ui_variables.white,
Rect(948, 0, 180, 730)
)
# Draw next mino
grid_n = tetrimino.mino_map[next - 1][0]
for i in range(4): # 16개의 그리드 칸에서 true인 값만 뽑아서 draw.rect
for j in range(4):
dx = 979 + block_size * j
dy = 220 + block_size * i
if grid_n[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_n[i][j]]) # 다음 블럭의 형상 가독성을 높임.
# Draw hold mino
grid_h = tetrimino.mino_map[hold - 1][0]
if hold_mino != -1:
for i in range(4):
for j in range(4):
dx = 979 + block_size * j
dy = 60 + block_size * i
if grid_h[i][j] != 0:
draw_block(dx,dy,ui_variables.t_color[grid_h[i][j]])
# Set max score
if score > 999999:
score = 999999
# Draw texts
text_hold = ui_variables.h5.render("HOLD", 1, ui_variables.black)
text_next = ui_variables.h5.render("NEXT", 1, ui_variables.black)
text_combo = ui_variables.h5.render("COMBO", 1, ui_variables.black) # 콤보
text_score = ui_variables.h5.render("SCORE", 1, ui_variables.black)
combo_value = ui_variables.h4.render(str(combo_count), 1, ui_variables.black) # 콤보 값
score_value = ui_variables.h4.render(str(score), 1, ui_variables.black)
text_level = ui_variables.h5.render("LEVEL", 1, ui_variables.black)
level_value = ui_variables.h4.render(str(level), 1, ui_variables.black)
text_goal = ui_variables.h5.render("GOAL", 1, ui_variables.black)
goal_value = ui_variables.h4.render(str(goal), 1, ui_variables.black)
# Place texts
### <<<<<<< HEAD
screen.blit(text_hold, (779, 14))
screen.blit(text_next, (779, 104))
screen.blit(text_score, (779, 194))
screen.blit(score_value, (784, 210))
screen.blit(text_level, (779, 254))
screen.blit(level_value, (784, 270))
screen.blit(text_goal, (779, 314))
screen.blit(goal_value, (784, 330))
## =======
screen.blit(text_hold, (979, 20))
screen.blit(text_next, (979, 170))
screen.blit(text_score, (979, 340))
screen.blit(score_value, (984, 370))
screen.blit(text_level, (979, 470))
screen.blit(level_value, (984, 500))
screen.blit(text_combo,(979,600))
screen.blit(combo_value,(984,630))
# Draw board
for x in range(width):
for y in range(height):
dx = 581 + block_size * x
dy = 17 + block_size * y
draw_block(dx, dy, ui_variables.t_color[matrix_2P[x][y + 1]])
# Draw a tetrimino
def draw_mino(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
tx, ty = x, y
while not is_bottom(tx, ty, mino, r):
ty += 1
# Draw ghost
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[tx + j][ty + i] = 8
# Draw mino
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
matrix[x + j][y + i] = grid[i][j]
def draw_mino_2P(x, y, mino, r):
grid = tetrimino.mino_map[mino - 1][r]
tx, ty = x, y
while not is_bottom_2P(tx, ty, mino, r):
ty += 1
# Draw ghost
for i in range(4):
for j in range(4):
if grid[i][j] != | |
#!/usr/bin/env python
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# for testing
import argparse
import datetime
import numpy as np
import itertools
from core.bc import BC
from core.ddpg import DDPG
from tensorboardX import SummaryWriter
from experiments.config import *
from core.replay_memory import BaseMemory as ReplayMemory
from core import networks
from core.utils import *
import IPython
import matplotlib.pyplot as plt
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import cv2
import torch.nn as nn
import threading
import argparse
import pprint
import time, os, sys
import os.path as osp
import numpy as np
import copy
from core.env_planner import EnvPlanner
from OMG.omg.config import cfg as planner_cfg
# try: # ros
import tf
import tf2_ros
import rosnode
import message_filters
import _init_paths
import rospy
import tf.transformations as tra
import std_msgs.msg
from sensor_msgs.msg import Image, CameraInfo
from sensor_msgs.msg import PointCloud2, PointField
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Pose, PoseArray, Point, Quaternion
from sensor_msgs import point_cloud2
from cv_bridge import CvBridge, CvBridgeError
lock = threading.Lock()
# for real robot
from lula_franka.franka import Franka
from joint_listener import JointListener
from moveit import MoveitBridge
# use posecnn layer for backprojection
import posecnn_cuda
# graspnet
import tensorflow
sys.path.insert(0, '6dof-graspnet')
# set policy mode
GA_DDPG_ONLY = True
GRASPNET_ONLY = False
COMBINED = False
RANDOM_TARGET = False
USE_LOOK_AT = False
CONTACT_GRASPNET = False
PUT_BIN = False
# contact graspnet
from grasp_estimator import GraspEstimator, get_graspnet_config, joint_config
if CONTACT_GRASPNET:
sys.path.insert(0, 'contact_graspnet')
sys.path.insert(0, 'contact_graspnet/contact_graspnet')
from inference_edit import get_graspnet_config as get_graspnet_config_contact
from contact_grasp_estimator import GraspEstimator as GraspEstimatorContact
import config_utils
# compute look at pose according to object pose
def compute_look_at_pose(pose_listener, center_object, angle, distance, psi=0):
# find the hand camera to hand transformation
try:
tf_pose = pose_listener.lookupTransform('measured/camera_color_optical_frame', 'measured/right_gripper', rospy.Time(0))
pose_camera = make_pose(tf_pose)
except (tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException):
pose_camera = None
if pose_camera is not None:
pose_camera[:3, :3] = np.eye(3)
pose_camera[:3, 3] *= -1
else:
print('cannot find camera to hand transformation')
psi /= 57.3
theta = angle / 57.3
r = distance
position_robot = center_object + np.array([-r * np.cos(theta) * np.cos(psi),
-r * np.cos(theta) * np.sin(psi),
r * np.sin(theta)], dtype=np.float32)
Z_BG = center_object - position_robot
Z_BG /= np.linalg.norm(Z_BG)
Y_BG = np.array([-np.sin(psi), np.cos(psi), 0], dtype=np.float32)
X_BG = np.cross(Y_BG, Z_BG)
R_BG = np.zeros((3, 3), dtype=np.float32)
R_BG[:, 0] = X_BG
R_BG[:, 1] = Y_BG
R_BG[:, 2] = Z_BG
pose_robot = np.eye(4, dtype=np.float32)
pose_robot[:3, 3] = position_robot
pose_robot[:3, :3] = R_BG[:3, :3]
# adjust for camera offset
if pose_camera is not None:
pose_robot = np.dot(pose_camera, pose_robot)
return pose_robot
class ImageListener:
def __init__(self, agent, graspnet, graspnet_contact):
franka = Franka(is_physical_robot=True)
self.moveit = MoveitBridge(franka)
self.moveit.retract()
# self.moveit.close_gripper()
self.moveit.open_gripper()
self.joint_listener = JointListener()
self.pose_listener = tf.TransformListener()
print('sleep a short time')
rospy.sleep(2.0)
print('current robot joints')
print(self.joint_listener.joint_position)
tf_pose = self.pose_listener.lookupTransform('measured/panda_hand', 'measured/right_gripper', rospy.Time(0))
self.grasp_offset = make_pose(tf_pose)
print('grasp offset', self.grasp_offset)
self.agent = agent
self.graspnet = graspnet
self.graspnet_contact = graspnet_contact
self.cv_bridge = CvBridge()
self.im = None
self.depth = None
self.rgb_frame_id = None
self.rgb_frame_stamp = None
self.im_ef_pose = None
self.acc_points = np.zeros([4, 0])
self.depth_threshold = 1.2
self.table_height = 0.0
self.initial_joints = initial_joints
self.num_initial_joints = initial_joints.shape[0]
self.index_joints = 0
self.target_obj_id = 1 # target object ID
# publish object points for visualization
self.empty_msg = PointCloud2()
self.object_points2_target_pub = rospy.Publisher('/gaddpg_object_points2_target', PointCloud2, queue_size=10)
self.object_points2_obstacle_pub = rospy.Publisher('/gaddpg_object_points2_obstacle', PointCloud2, queue_size=10)
# initialize a node
self.label_sub = message_filters.Subscriber('seg_label', Image, queue_size=1)
self.hand_finger_point = np.array([ [ 0., 0., 0. , -0. , 0. , -0. ],
[ 0., 0., 0.053, -0.053, 0.053, -0.053],
[ 0., 0., 0.075, 0.075, 0.105, 0.105]])
self.bin_conf_1 = np.array([0.7074745589850109, 0.361727706885124, 0.38521270434333,
-1.1754794559646125, -0.4169872830046795, 1.7096866963969337, 1.654512471818922]).astype(np.float32)
self.bin_conf_2 = np.array([0.5919747534674433, 0.7818432665691674, 0.557417382701195,
-1.1647884021323738, -0.39191044586242046, 1.837464805311654, 1.9150514982533562]).astype(np.float32)
if cfg.ROS_CAMERA == 'D415':
# use RealSense D435
self.base_frame = 'measured/base_link'
camera_name = 'cam_2'
rgb_sub = message_filters.Subscriber('/%s/color/image_raw' % camera_name, Image, queue_size=1)
depth_sub = message_filters.Subscriber('/%s/aligned_depth_to_color/image_raw' % camera_name, Image, queue_size=1)
msg = rospy.wait_for_message('/%s/color/camera_info' % camera_name, CameraInfo)
self.camera_frame = 'measured/camera_color_optical_frame'
self.target_frame = self.base_frame
elif cfg.ROS_CAMERA == 'Azure':
self.base_frame = 'measured/base_link'
rgb_sub = message_filters.Subscriber('/k4a/rgb/image_raw', Image, queue_size=1)
depth_sub = message_filters.Subscriber('/k4a/depth_to_rgb/image_raw', Image, queue_size=1)
msg = rospy.wait_for_message('/k4a/rgb/camera_info', CameraInfo)
self.camera_frame = 'rgb_camera_link'
self.target_frame = self.base_frame
else:
# use kinect
self.base_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA)
rgb_sub = message_filters.Subscriber('/%s/rgb/image_color' % (cfg.ROS_CAMERA), Image, queue_size=1)
depth_sub = message_filters.Subscriber('/%s/depth_registered/image' % (cfg.ROS_CAMERA), Image, queue_size=1)
msg = rospy.wait_for_message('/%s/rgb/camera_info' % (cfg.ROS_CAMERA), CameraInfo)
self.camera_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA)
self.target_frame = self.base_frame
# update camera intrinsics
intrinsics = np.array(msg.K).reshape(3, 3)
self.fx = intrinsics[0, 0]
self.fy = intrinsics[1, 1]
self.px = intrinsics[0, 2]
self.py = intrinsics[1, 2]
print(intrinsics)
queue_size = 1
slop_seconds = 0.4
ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub, self.label_sub], queue_size, slop_seconds)
ts.registerCallback(self.callback_rgbdm)
# set global intrinsics and extrinsics
global INTRINSICS, EXTRINSICS
INTRINSICS = intrinsics
EXTRINSICS = np.zeros([4, 4])# from camera to end effector
EXTRINSICS[:3, 3] = (np.array([0.05253322227958818, -0.05414890498307623, 0.06035263861136299])) # camera offset
EXTRINSICS[:3, :3] = quat2mat([0.7182116422267757, 0.016333297635292354, 0.010996322012974747, 0.6955460741463947])
self.remaining_step = cfg.RL_MAX_STEP
# start publishing thread
self.start_publishing_tf()
self.planner = EnvPlanner()
self.expert_plan = []
self.standoff_idx = -1
self.has_plan = False
self.num_trial = 0
# threshold to close gripper
self.grasp_score_threshold = 0.4
def compute_plan_with_gaddpg(self, state, ef_pose, vis=False):
"""
generate initial expert plan
"""
joints = get_joints(self.joint_listener)
gaddpg_grasps_from_simulate_view(self.agent, state, cfg.RL_MAX_STEP, ef_pose)
print('finish simulate views')
# can use remaining timesteps to replan. Set vis to visualize collision and traj
self.expert_plan, self.standoff_idx = self.planner.expert_plan(cfg.RL_MAX_STEP, joints, ef_pose, state[0][0], vis=vis)
print('expert plan', self.expert_plan.shape)
print('standoff idx', self.standoff_idx)
def start_publishing_tf(self):
self.stop_event = threading.Event()
self.tf_thread = threading.Thread(target=self.publish_point_cloud)
self.tf_thread.start()
def publish_point_cloud(self):
rate = rospy.Rate(30.)
fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1)]
while not self.stop_event.is_set() and not rospy.is_shutdown():
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = self.base_frame
out_xyz = self.acc_points[:3, :].T
label = self.acc_points[3, :].flatten()
target_xyz = out_xyz[label == 0, :]
obj_pc2_target = point_cloud2.create_cloud(header, fields, target_xyz)
self.object_points2_target_pub.publish(obj_pc2_target)
obstacle_xyz = out_xyz[label == 1, :]
obj_pc2_obstacle = point_cloud2.create_cloud(header, fields, obstacle_xyz)
self.object_points2_obstacle_pub.publish(obj_pc2_obstacle)
# if out_xyz.shape[0] > 0:
# print('publish points')
# print(out_xyz.shape)
rate.sleep()
def callback_rgbdm(self, rgb, depth, mask):
ef_pose = get_ef_pose(self.pose_listener)
if depth.encoding == '32FC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth)
elif depth.encoding == '16UC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth).copy().astype(np.float32)
depth_cv /= 1000.0
else:
rospy.logerr_throttle(
1, 'Unsupported depth type. Expected 16UC1 or 32FC1, got {}'.format(
depth.encoding))
return
im = self.cv_bridge.imgmsg_to_cv2(rgb, 'bgr8')
mask = self.cv_bridge.imgmsg_to_cv2(mask, 'mono8')
# rescale image if necessary
# Lirui: consider rescaling to 112 x 112 which is used in training (probably not necessary)
if cfg.SCALES_BASE[0] != 1:
im_scale = cfg.SCALES_BASE[0]
im = pad_im(cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR), 16)
depth_cv = pad_im(cv2.resize(depth_cv, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16)
mask = pad_im(cv2.resize(mask, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16)
with lock:
self.im = im.copy()
self.im_ef_pose = ef_pose.copy()
self.mask = mask.copy()
self.depth = depth_cv.copy()
self.rgb_frame_id = rgb.header.frame_id
self.rgb_frame_stamp = rgb.header.stamp
def show_segmentation_result(self, color, mask, mask_ids):
image = color.copy()
for i in range(len(mask_ids)):
mask_id = mask_ids[i]
index = np.where(mask == mask_id)
x = int(np.mean(index[1]))
y = int(np.mean(index[0]))
image = cv2.putText(image, str(i+1), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 2, cv2.LINE_AA)
cv2.namedWindow("Display 1")
cv2.imshow("Display 1", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
value = input('Please enter which object to pick up: ')
return int(value)
def find_target_object(self, depth, mask, mask_ids, ef_pose, remaining_step, vis=False):
# select target points
target_mask = get_target_mask(self.acc_points)
points = self.acc_points[:3, target_mask]
# sample points
points = regularize_pc_point_count(points.T, 1024, use_farthest_point=True).T
# base to hand
points = se3_transform_pc(se3_inverse(ef_pose), points)
# hand to camera
offset_pose = se3_inverse(EXTRINSICS)
xyz_points = offset_pose[:3, :3].dot(points) + offset_pose[:3, [3]]
# projection to image
p_xyz = INTRINSICS.dot(xyz_points)
index = p_xyz[2] > 0.03
p_xyz = p_xyz[:, index]
xyz_points = xyz_points[:, index]
x, y = (p_xyz[0] / p_xyz[2]).astype(np.int), (p_xyz[1] / p_xyz[2]).astype(np.int)
# bounding box
x1 = np.min(x)
x2 = np.max(x)
y1 = np.min(y)
y2 = np.max(y)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
# check labels
valid_idx_mask = (x > 0) * (x < mask.shape[1] - 1) * (y > 0) * (y < mask.shape[0] - 1)
labels = mask[y[valid_idx_mask], x[valid_idx_mask]]
labels_nonzero = labels[labels > 0]
xyz_points = xyz_points[:, valid_idx_mask]
# find the marjority label
if float(len(labels_nonzero)) / float((len(labels) + 1)) < 0.5:
print('overlap to background')
target_id = -1
else:
target_id = np.bincount(labels_nonzero).argmax()
# check bounding box overlap
I = np.where(mask == target_id)
x11 = np.min(I[1])
x22 = np.max(I[1])
y11 = np.min(I[0])
y22 = np.max(I[0])
area1 = (x22 - x11 + 1) * (y22 - y11 + 1)
xx1 = np.maximum(x1, x11)
yy1 = np.maximum(y1, y11)
xx2 = np.minimum(x2, x22)
yy2 = np.minimum(y2, y22)
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr | |
V, with placeholder values added for new beliefs
B, updated with new beliefs
"""
n_S = np.shape(R)[0]
n_A = np.shape(R)[1]
n_B = np.shape(B)[0]
if eps is None:
eps = 0.01*n_S
if n_B > 250:
print("already have over 250 belief points. careful! not expanding more...")
O_dims = np.shape(O)[1]
O_means = O[0]; O_sds = O[1]
#### no reason to resample O each iteration; so sample obs beforehand and cache
O_samps = np.random.normal(0,1,(n_samps,O_dims,n_S,n_A))
O_samps = O_means + O_sds*O_samps
#precompute and cache b^ao for sampled observations...
O_logprob = np.sum(stat.norm.logpdf(O_samps[:,:,:,:,None],
np.transpose(O_means,[0,2,1])[:,None,:,:],
np.transpose(O_sds,[0,2,1])[:,None,:,:],),1)
log_B = np.log(B+1e-16) # B x S
log_T = np.log(T+1e-16) #S' x S x A
log_TB = logsumexp(log_B[None,:,:,None] + log_T[:,None,:,:],2)# S' x S
log_bao = np.transpose(O_logprob[:,:,None,:,:] + log_TB[None,:,:,:,None],[2,0,3,1,4])
b_ao = np.exp(log_bao - logsumexp(log_bao,4)[:,:,:,:,None]) #B x K x A x S' x S
#innermost bit is size B x B' x k x A x S' x S
b_ao_alldiffs = np.transpose(np.einsum('abcdef->abcde',np.abs(b_ao[:,None,:] - B[None,:,None,None,None,:])),[0,2,3,4,1])
b_ao_diffs = np.min(b_ao_alldiffs,4) #B x k x A x S': how far this belief expansion is from existing B
#get all beliefs that were max distance away
tmp_max = np.max(b_ao_diffs,axis=(1,2,3))
inds = tmp_max>eps #which expansions from original beliefs were decently far away
inds2 = np.isclose(b_ao_diffs[inds,:,:,:],tmp_max[inds,None,None,None],atol=1e-6,rtol=1e-6) #get everything that was close
new_B = b_ao[inds][inds2,:] #relaxed inds2 a bit to let more in to check...
if np.shape(new_B)[0]>0:
### should do some sort of check in new_B for redundancy...it's ok
# if we accept several new beliefs if they're all close to the max
# but in different directions...not great if they're all still close by
new_nB = np.shape(new_B)[0]
#B x B pairwise differences between vectors
# diffs = np.reshape(np.sum(np.abs(np.tile(new_B,(new_nB,1))-np.repeat(new_B,new_nB,axis=0)),1),(new_nB,new_nB))
#
# diffs2 = np.zeros((new_nB,new_nB))
# for i in range(new_nB):
# for ii in range(new_nB):
# diffs2[i,ii] = np.sum(np.abs(new_B[i,:]-new_B[ii,:]))
#
# diff_inds = diffs>eps
#TODO: more efficient way?
keep_inds = [0]
for i in range(1,new_nB):
diffs = np.sum(np.abs(new_B[keep_inds,:] - new_B[i,:]),1)
if np.all(diffs>eps):
keep_inds.append(i)
new_B = new_B[keep_inds,:]
B = np.vstack([B,new_B])
#add values to existing value function
V[0] = np.vstack([V[0],V_min*np.ones(np.shape(new_B))])
n_B = np.shape(B)[0]
V[1] = -1*np.ones(n_B)
return V,B
def pbvi(T,O,R,gamma,B=None,V=None,max_iter=100,eps=.1,verbose=False,n_B_steps=3,
max_V_iters=100):
"""
main PBVI function.
inputs:
B: n_B x n_S array with initial belief set. if none, use pre-chosen one
optional inputs:
V: in case we're doing a warm start from a previous iteration
max_iter:
eps:
outputs:
V:
B:
"""
n_S = np.shape(R)[0]
if B is None:
high = .99
B = (1-high)/(n_S-1)*np.ones((n_S+1,n_S))
B[0,:] = 1/n_S
for i in range(n_S):
B[i+1,i] = high
n_B = np.shape(B)[0]
if verbose:
print("starting with %d beliefs..." %n_B)
sys.stdout.flush()
V_min = np.min(R)/(1-gamma)
if V is None:
#init value function: V[0] is array of alpha vectors, V[1] is corresponding action
V = [V_min*np.ones((n_B,n_S)),-1*np.ones(n_B)]
V_maxdiff = np.inf #difference between successive value functions (alpha vectors)
n_iter = 0
while V_maxdiff > eps: #loop until V has converged or hit max_iter
if verbose:
print("iter %d" %n_iter)
sys.stdout.flush()
#####
##### improvement step
#####
if verbose:
print("updating beliefs...")
sys.stdout.flush()
V = update_V_softmax(V,B,T,O,R,gamma,max_iter=max_V_iters)
##TODO: what is the stopping criteria here??
#want to end after an update step and not an expand step...
#after expand step we have a bunch of extra beliefs without a value
if n_iter == max_iter:
if verbose:
print("updated in last iter, quitting...")
sys.stdout.flush()
break
#####
##### expansion step where we add new belief points to set
#####
if verbose:
print("expanding beliefs...")
sys.stdout.flush()
for i in range(n_B_steps):
V,B = expand_B(V,B,T,O,R,V_min,gamma)
n_B = np.shape(B)[0]
if verbose:
print("there are now %d beliefs in B..." %n_B)
sys.stdout.flush()
n_iter += 1
return V,B
################ OLD FUNCS NOT USED ANYMORE (since everything is softmax now)
# def update_V(V,B,T,O,R,gamma,eps=None,max_iter=50,verbose=True,n_samps=100,
# seed=False):
# """
# inputs:
# V (list):
# V[0]: n_B x n_S array of alpha-vector values for each belief
# V[1]: n_B array, denoting which action generated each alpha-vector
# B: n_B x n_S array of belief states to be updated
# optional inputs:
# outputs:
# V (same as input), updated
# """
# if seed: #testing
# np.random.seed(711)
# n_B = np.shape(B)[0]
# n_V = np.shape(B)[0]
# n_A = np.shape(R)[1]
# n_S = np.shape(R)[0]
# O_dims = np.shape(O)[1]
# O_means = O[0]; O_sds = O[1] #O_dims,n_S,n_A
# if eps is None:
# eps = 0.01*n_S
# #### no reason to resample O each iteration; so sample obs beforehand and cache
# O_samps = np.random.normal(0,1,(n_samps,O_dims,n_S,n_A))
# O_samps = O_means + O_sds*O_samps
# #precompute and cache b^ao for sampled observations...
# O_logprob = np.sum(stat.norm.logpdf(O_samps[:,:,:,:,None],
# np.transpose(O_means,[0,2,1])[:,None,:,:],
# np.transpose(O_sds,[0,2,1])[:,None,:,:],),1)
# log_B = np.log(B+1e-16) # B x S
# log_T = np.log(T+1e-16) #S' x S x A
# log_TB = logsumexp(log_B[None,:,:,None] + log_T[:,None,:,:],2)# S' x S
# log_bao = np.transpose(O_logprob[:,:,None,:,:] + log_TB[None,:,:,:,None],[2,0,3,1,4])
# b_ao = np.exp(log_bao - logsumexp(log_bao,4)[:,:,:,:,None]) #B x K x A x S' x S
# # O_prob = np.exp(O_logprob)
# # b_ao = np.einsum('abcd,ef,bfc->eacbd',O_prob,B,T) #B x K x A x S' x S
# # b_ao /= np.einsum('abcde->abcd',b_ao)[:,:,:,:,None]
# ### precompute funky indexing needed...
# v_ = []
# b_ = []
# a_ = []
# for v in range(n_V):
# for b in range(n_B):
# for a in range(n_A):
# v_.append(v)
# b_.append(b)
# a_.append(a)
# v_ = np.reshape(v_,[n_V,n_B,n_A])
# b_ = np.reshape(b_,[n_V,n_B,n_A])
# a_ = np.reshape(a_,[n_V,n_B,n_A])
# for ct in range(max_iter):
# old_V = np.array(V[0],copy=True)
# alpha_bao = np.einsum('ab,cdefb->acdef',V[0],b_ao) #V x B x K x A x S'
# argmax_alpha_bao = np.argmax(alpha_bao,0) #B x K x A x S'
# prob_meta_obs = np.array([np.mean(argmax_alpha_bao==i,axis=1) for i in range(n_V)]) #V x B x A x S'
# alpha_aO_alpha2 = np.einsum('ab,bcd,efdb->efdac',V[0],T,prob_meta_obs) #V' x B x A x V x S
# B_alpha_aO_alpha2 = np.einsum('ab,cadeb->cade',B,alpha_aO_alpha2) #V' x B x A x V
# argmax_aB = np.argmax(B_alpha_aO_alpha2,axis=3) #V' x B x A
# #tricky indexing
# selected_B_alpha_aO_alpha2 = alpha_aO_alpha2[v_,b_,a_,argmax_aB,:] #V' x B x A x S
# alpha_ab = R.T + gamma*np.einsum('abcd->bcd',selected_B_alpha_aO_alpha2) #B x A x S
# alpha_ab_B = np.einsum('ab,acb->ac',B,alpha_ab) #B x A
# argmax_alpha_abB = np.argmax(alpha_ab_B,axis=1) #B
# selected_alpha_abB = alpha_ab[np.arange(n_B),argmax_alpha_abB,:] #B x S; again tricky indexing
# V[0] = selected_alpha_abB
# V[1] = argmax_alpha_abB
# diff = np.sum(np.abs(V[0]-old_V))
# #check for convergence
# if diff < eps:
# return V
# # if verbose:
# # print("didn't converge during update :(" %np.sum(np.abs(V[0]-old_V)))
# return V
# def run_policy(T,O,R,pi,R_sd,T_est=None,O_est=None,R_est=None,belief=None,V=None,steps=1000,
# seed=8675309,tiger_env=False,temp=None):
# """
# runs a policy for a period of time, and records trajectories.
# policy is parameterized by a value function composed of alpha vectors.
# inputs:
# V: if None, use a random policy where we just select random actions.
# Otherwise should be value function as represented in PBVI func
# outputs:
# full trajectories
# """
# rng = np.random.RandomState(seed)
# n_A = np.shape(R)[1]
# n_S = np.shape(R)[0]
# #default values
# if V is None:
# action_probs = 1/n_A*np.ones(n_A)
# if belief is None:
# belief = pi
# # if T_est is None:
# # T_est = T
# # if O_est is None:
# # O_est = O
# # if R_est is None:
# # R_est = R
# if temp is not None:
# assert(temp>0)
# log_T_est = np.log(T_est)
# n_dim = len(O)
# states = []
# beliefs = []
# actions = []
# rewards = []
# observations = []
# #initial state
# state = []
# for s in range(n_S):
# state.append(draw_discrete(pi[s],rng))
# states.append(state)
# #loop & sim trajectory
# for t in range(steps):
# if V is None: #random actions
# action = draw_discrete(action_probs,rng)
# else: #use our learned value function from planning to greedily select optimal action
# if temp is None: #deterministic policy
# b_alpha = np.dot(V[0],belief)
# action = V[1][np.argmax(b_alpha)]
# else: #stochastic policy defined by softmax over alphas w/ temperature
# b_alpha = np.dot(V[0],belief)/temp
# alpha_logprobs = b_alpha-logsumexp(b_alpha)
# alpha = draw_discrete(np.exp(alpha_logprobs))
# action = V[1][alpha]
# state,obs,reward = sim_step_bydim(state,action,T,O,R,rng)
# belief = update_belief(belief,obs,action,log_T_est,O_est)
# states.append(state)
# beliefs.append(belief)
# actions.append(action)
# rewards.append(reward)
# observations.append(obs)
# if tiger_env and action!=n_A-1: #if chose and didn't listen, end for tiger scenarios
# break
# return ( np.array(states),np.array(beliefs),np.array(actions),
# np.array(rewards),np.array(observations) )
if __name__ == "__main__":
np.set_printoptions(threshold=1000)
np.random.seed(111)
# from envs_cts import create_tiger_plus_environment,create_tiger_plus_witness
n_dim=4
sig_good = .2
| |
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Real tensor. The real result of the 2-D Hermitian complex real FFT.
Raises:
ValueError: if `s` not be a sequence of 2 integers or None.
ValueError: if `axes` not be a sequence of 2 integers or None.
ValueError: If the input dimension is smaller than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = (np.array([[3,2,3],[2, 2, 3]]) + 1j * np.array([[3,2,3],[2, 2, 3]])).astype(np.complex128)
xp = paddle.to_tensor(x)
hfft2_xp = paddle.fft.hfft2(xp).numpy()
print(hfft2_xp)
# [[19. 7. 3. -9.]
# [ 1. 1. 1. 1.]]
"""
_check_at_least_ndim(x, 2)
if s is not None:
if not isinstance(s, Sequence) or len(s) != 2:
raise ValueError(
"Invalid FFT argument s ({}), it should be a sequence of 2 integers."
.format(s))
if axes is not None:
if not isinstance(axes, Sequence) or len(axes) != 2:
raise ValueError(
"Invalid FFT argument axes ({}), it should be a sequence of 2 integers."
.format(axes))
return hfftn(x, s, axes, norm, name)
def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
"""
Compute the two dimensional inverse FFT of a real spectrum.
This is really `ihfftn` with different defaults.
For more details see `ihfftn`.
Args:
x(Tensor): Input tensor
s(Sequence[int], optional): Shape of the real input to the inverse FFT.
axes(Sequance[int], optional): The axes over which to compute the
inverse fft. Default is the last two axes.
norm(str, optional): {"backward", "ortho", "forward"}. Default is
"backward".
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
out(Tensor) : The result of the inverse hermitian 2-D FFT.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.mgrid[:5, :5][0].astype(np.float64)
xp = paddle.to_tensor(x)
ihfft2_xp = paddle.fft.ihfft2(xp).numpy()
print(ihfft2_xp)
# [[ 2. +0.j 0. +0.j 0. +0.j ]
# [-0.5-0.68819096j 0. +0.j 0. +0.j ]
# [-0.5-0.16245985j 0. +0.j 0. +0.j ]
# [-0.5+0.16245985j 0. +0.j 0. +0.j ]
# [-0.5+0.68819096j 0. +0.j 0. +0.j ]]
"""
_check_at_least_ndim(x, 2)
if s is not None:
if not isinstance(s, Sequence) or len(s) != 2:
raise ValueError(
"Invalid FFT argument s ({}), it should be a sequence of 2 integers."
.format(s))
if axes is not None:
if not isinstance(axes, Sequence) or len(axes) != 2:
raise ValueError(
"Invalid FFT argument axes ({}), it should be a sequence of 2 integers."
.format(axes))
return ihfftn(x, s, axes, norm, name)
# public APIs utilities
def fftfreq(n, d=1.0, dtype=None, name=None):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given input length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Args:
n (int): Dimension inputed.
d (scalar, optional): Sample spacing (inverse of the sampling rate). Defaults is 1.
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. A tensor of length 'n' containing the sampling frequency.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
scalar_temp = 0.5
n = x.size
fftfreq_xp = paddle.fft.fftfreq(n, d=scalar_temp)
print(fftfreq_xp)
# Tensor(shape=[5], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [ 0. , 0.40000001, 0.80000001, -0.80000001, -0.40000001])
"""
dtype = paddle.framework.get_default_dtype()
val = 1.0 / (n * d)
pos_max = (n + 1) // 2
neg_max = n // 2
indices = paddle.arange(-neg_max, pos_max, dtype=dtype, name=name)
indices = paddle.roll(indices, -neg_max, name=name)
return indices * val
def rfftfreq(n, d=1.0, dtype=None, name=None):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned floating-point array "F" contains the center of the frequency unit,
and the unit is the number of cycles of the sampling interval (the starting point is zero).
Given input length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
the Nyquist frequency component is considered to be positive.
Args:
n (int): Dimension inputed.
d (scalar, optional): Sample spacing (inverse of the sampling rate). Defaults is 1.
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. A tensor of length ``n//2 + 1`` containing the sample frequencies.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
scalar_temp = 0.3
n = x.size
rfftfreq_xp = paddle.fft.rfftfreq(n, d=scalar_temp)
print(rfftfreq_xp)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [0. , 0.66666669, 1.33333337])
"""
dtype = paddle.framework.get_default_dtype()
val = 1.0 / (n * d)
pos_max = 1 + n // 2
indices = paddle.arange(0, pos_max, dtype=dtype, name=name)
return indices * val
def fftshift(x, axes=None, name=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half spaces for all the axes listed (all by default).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Args:
n (int): Dimension inputed.
axes (int|tuple, optional): The axis on which to move. The default is none, which moves all axes.
Default is None.
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. The shifted tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
n = x.size
fftfreq_xp = paddle.fft.fftfreq(n, d=0.3)
res = paddle.fft.fftshift(fftfreq_xp).numpy()
print(res)
# [-1.3333334 -0.6666667 0. 0.6666667 1.3333334]
"""
shape = paddle.shape(x)
if axes is None:
# shift all axes
rank = len(x.shape)
axes = list(range(0, rank))
shifts = shape // 2
elif isinstance(axes, int):
shifts = shape[axes] // 2
else:
shifts = paddle.concat([shape[ax] // 2 for ax in axes])
return paddle.roll(x, shifts, axes, name=name)
def ifftshift(x, axes=None, name=None):
"""
The inverse of `fftshift`. Although the even length 'x' is the same, the function of the
odd length 'x' is different. An example.
Args:
n (int): Dimension inputed.
axes (int|tuple, optional): The axis on which to move. The default is none, which moves all axes.
Default is None.
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. The shifted tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = np.array([3, 1, 2, 2, 3], dtype=float)
n = x.size
fftfreq_xp = paddle.fft.fftfreq(n, d=0.3)
res = paddle.fft.ifftshift(fftfreq_xp).numpy()
print(res)
# [ 1.3333334 -1.3333334 -0.6666667 0. 0.6666667]
"""
shape = paddle.shape(x)
if axes is None:
# shift all axes
rank = len(x.shape)
axes = list(range(0, rank))
shifts = -shape // 2
elif isinstance(axes, int):
shifts = -shape[axes] // 2
else:
shifts = paddle.concat([-shape[ax] // 2 for ax in axes])
return paddle.roll(x, shifts, axes, name=name)
# internal functions
def fft_c2c(x, n, axis, norm, forward, name):
if is_integer(x):
x = paddle.cast(x, _real_to_complex_dtype(paddle.get_default_dtype()))
elif is_floating_point(x):
x = paddle.cast(x, _real_to_complex_dtype(x.dtype))
_check_normalization(norm)
axis = axis if axis is not None else -1
_check_fft_axis(x, axis)
axes = [axis]
axes = _normalize_axes(x, axes)
if n is not None:
_check_fft_n(n)
s = [n]
x = _resize_fft_input(x, s, axes)
op_type = 'fft_c2c'
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type)
if _non_static_mode():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
| |
: ntuniprot(RecName_Full='Probable phosphatase PSR2'),
'Q07950' : ntuniprot(RecName_Full='Sterol esterase 2'),
'Q07951' : ntuniprot(RecName_Full='Proteasome chaperone 3'),
'Q07953' : ntuniprot(RecName_Full='Ribosome maturation protein SDO1'),
'Q07959' : ntuniprot(RecName_Full='ADIPOR-like receptor IZH3'),
'Q07963' : ntuniprot(RecName_Full='E3 ubiquitin-protein ligase UBR2'),
'Q07967' : ntuniprot(RecName_Full='Putative uncharacterized protein YLR030W'),
'Q07978' : ntuniprot(RecName_Full='Putative uncharacterized protein YLR031W'),
'Q07979' : ntuniprot(RecName_Full='Chromatin structure-remodeling complex protein RSC58'),
'Q07980' : ntuniprot(RecName_Full='DNA mismatch repair protein MLH2'),
'Q07986' : ntuniprot(RecName_Full='Uncharacterized protein YLR036C'),
'Q07987' : ntuniprot(RecName_Full='Seripauperin-23'),
'Q07988' : ntuniprot(RecName_Full='Cell wall protein YLR040C'),
'Q07990' : ntuniprot(RecName_Full='Cell wall protein YLR042C'),
'Q07993' : ntuniprot(RecName_Full='D-xylulose reductase'),
'Q08001' : ntuniprot(RecName_Full='Membrane-anchored lipid-binding protein LAM6 {ECO:0000303|PubMed:26001273}'),
'Q08003' : ntuniprot(RecName_Full='Regulator of free ubiquitin chains 1'),
'Q08004' : ntuniprot(RecName_Full='Bud site selection protein 20'),
'Q08023' : ntuniprot(RecName_Full='Protein FMP25, mitochondrial'),
'Q08032' : ntuniprot(RecName_Full='Cell division control protein 45'),
'Q08045' : ntuniprot(RecName_Full='Long chronological lifespan protein 2'),
'Q08054' : ntuniprot(RecName_Full='Chitin synthase 3 complex protein CSI2'),
'Q08058' : ntuniprot(RecName_Full='Coenzyme Q-binding protein COQ10, mitochondrial'),
'Q08096' : ntuniprot(RecName_Full="RNA 3'-terminal phosphate cyclase-like protein"),
'Q08108' : ntuniprot(RecName_Full='Lysophospholipase 3'),
'Q08109' : ntuniprot(RecName_Full='ERAD-associated E3 ubiquitin-protein ligase HRD1'),
'Q08110' : ntuniprot(RecName_Full='Putative uncharacterized protein YOL014W'),
'Q08118' : ntuniprot(RecName_Full='Uncharacterized protein IRC10'),
'Q08119' : ntuniprot(RecName_Full='Protein ESC8'),
'Q08144' : ntuniprot(RecName_Full='t-SNARE affecting a late Golgi compartment protein 2'),
'Q08157' : ntuniprot(RecName_Full='Uncharacterized membrane protein YOL019W'),
'Q08162' : ntuniprot(RecName_Full='Exosome complex exonuclease DIS3'),
'Q08172' : ntuniprot(RecName_Full='Putative uncharacterized protein YOL024W'),
'Q08176' : ntuniprot(RecName_Full='Mitochondrial import protein 1'),
'Q08179' : ntuniprot(RecName_Full='Mitochondrial distribution and morphology protein 38 {ECO:0000303|PubMed:11907266}'),
'Q08182' : ntuniprot(RecName_Full='AP-1-like transcription factor YAP7'),
'Q08187' : ntuniprot(RecName_Full='Uncharacterized protein YOL029C'),
'Q08193' : ntuniprot(RecName_Full='1,3-beta-glucanosyltransferase GAS5'),
'Q08199' : ntuniprot(RecName_Full='Nucleotide exchange factor SIL1'),
'Q08202' : ntuniprot(RecName_Full='Protein OPI10'),
'Q08204' : ntuniprot(RecName_Full='Structural maintenance of chromosomes protein 5'),
'Q08206' : ntuniprot(RecName_Full='Uncharacterized protein YOL036W'),
'Q08208' : ntuniprot(RecName_Full='Nucleolar protein 12'),
'Q08213' : ntuniprot(RecName_Full='RNA exonuclease NGL1'),
'Q08214' : ntuniprot(RecName_Full='Endonuclease III homolog 2 {ECO:0000255|HAMAP-Rule:MF_03183}'),
'Q08215' : ntuniprot(RecName_Full='Peroxisomal membrane protein PEX15'),
'Q08217' : ntuniprot(RecName_Full='Serine/threonine-protein kinase PSK2'),
'Q08218' : ntuniprot(RecName_Full='Outer spore wall protein LDS2 {ECO:0000305|PubMed:23966878}'),
'Q08219' : ntuniprot(RecName_Full='Outer spore wall protein RRT8 {ECO:0000305|PubMed:23966878}'),
'Q08220' : ntuniprot(RecName_Full='Glutathione synthetase'),
'Q08223' : ntuniprot(RecName_Full='Altered inheritance of mitochondria protein 39, mitochondrial'),
'Q08224' : ntuniprot(RecName_Full='Hydroxymethylpyrimidine/phosphomethylpyrimidine kinase THI20'),
'Q08225' : ntuniprot(RecName_Full='Probable dipeptidyl peptidase 3'),
'Q08226' : ntuniprot(RecName_Full='Protein CRT10'),
'Q08227' : ntuniprot(RecName_Full='Phosphatidylinositol 4,5-bisphosphate 5-phosphatase INP54'),
'Q08229' : ntuniprot(RecName_Full='Protein NBA1'),
'Q08230' : ntuniprot(RecName_Full='Succinate dehydrogenase assembly factor 2, mitochondrial {ECO:0000255|HAMAP-Rule:MF_03057, ECO:0000303|PubMed:23062074}'),
'Q08231' : ntuniprot(RecName_Full='Nuclear mRNA export protein THP1'),
'Q08232' : ntuniprot(RecName_Full='Uncharacterized membrane protein YOL073C'),
'Q08234' : ntuniprot(RecName_Full='Uncharacterized ABC transporter ATP-binding protein/permease YOL075C'),
'Q08235' : ntuniprot(RecName_Full='Ribosome biogenesis protein BRX1'),
'Q08236' : ntuniprot(RecName_Full='Target of rapamycin complex 2 subunit AVO1'),
'Q08237' : ntuniprot(RecName_Full='RNA exonuclease 4'),
'Q08245' : ntuniprot(RecName_Full='Protein ZEO1'),
'Q08268' : ntuniprot(RecName_Full='Probable transporter MCH4'),
'Q08269' : ntuniprot(RecName_Full='Magnesium transporter ALR1'),
'Q08270' : ntuniprot(RecName_Full='Uncharacterized protein YOL131W'),
'Q08271' : ntuniprot(RecName_Full='1,3-beta-glucanosyltransferase GAS4'),
'Q08273' : ntuniprot(RecName_Full='RING-box protein HRT1'),
'Q08278' : ntuniprot(RecName_Full='Mediator of RNA polymerase II transcription subunit 7'),
'Q08280' : ntuniprot(RecName_Full='Bypass of stop codon protein 6'),
'Q08281' : ntuniprot(RecName_Full='Restriction of telomere capping protein 1'),
'Q08282' : ntuniprot(RecName_Full='tRNA wybutosine-synthesizing protein 4'),
'Q08285' : ntuniprot(RecName_Full='Exosome complex component RRP40'),
'Q08287' : ntuniprot(RecName_Full='60S ribosome subunit biogenesis protein NOP8'),
'Q08295' : ntuniprot(RecName_Full='Oligo-1,6-glucosidase IMA2'),
'Q08299' : ntuniprot(RecName_Full='Siderophore iron transporter ENB1'),
'Q08300' : ntuniprot(RecName_Full='Uncharacterized protein YOL159C'),
'Q08322' : ntuniprot(RecName_Full='Seripauperin-20'),
'Q08347' : ntuniprot(RecName_Full='Alkyl/aryl-sulfatase BDS1 {ECO:0000303|PubMed:15947202}'),
'Q08361' : ntuniprot(RecName_Full='Putative aryl-alcohol dehydrogenase AAD15'),
'Q08387' : ntuniprot(RecName_Full='DNA ligase 4'),
'Q08409' : ntuniprot(RecName_Full='ATP-dependent permease AUS1'),
'Q08412' : ntuniprot(RecName_Full='Ubiquitin-binding protein CUE5'),
'Q08416' : ntuniprot(RecName_Full='Increased recombination centers protein 23'),
'Q08417' : ntuniprot(RecName_Full='Sphingoid long-chain base transporter RSB1'),
'Q08421' : ntuniprot(RecName_Full='Enhancer of translation termination 1'),
'Q08422' : ntuniprot(RecName_Full='AN1-type zinc finger protein TMC1 {ECO:0000305}'),
'Q08438' : ntuniprot(RecName_Full='Phosphopantothenoylcysteine decarboxylase subunit VHS3'),
'Q08444' : ntuniprot(RecName_Full='20S-pre-rRNA D-site endonuclease NOB1'),
'Q08446' : ntuniprot(RecName_Full='Protein SGT1 {ECO:0000303|PubMed:10445024, ECO:0000303|PubMed:12456005}'),
'Q08448' : ntuniprot(RecName_Full='Putative lipase YOR059C'),
'Q08457' : ntuniprot(RecName_Full='Mitochondrial morphogenesis protein SLD7'),
'Q08465' : ntuniprot(RecName_Full='Protein YNG1'),
'Q08471' : ntuniprot(RecName_Full='G1-specific transcription factors activator MSA1'),
'Q08474' : ntuniprot(RecName_Full='Vacuolar morphogenesis protein 10'),
'Q08484' : ntuniprot(RecName_Full='GTPase-activating protein GYP1'),
'Q08485' : ntuniprot(RecName_Full='Nicotinamide riboside transporter 1'),
'Q08490' : ntuniprot(RecName_Full='Shugoshin'),
'Q08491' : ntuniprot(RecName_Full='Superkiller protein 7'),
'Q08492' : ntuniprot(RecName_Full='Bud site selection protein 21'),
'Q08496' : ntuniprot(RecName_Full='Protein DIA2'),
'Q08504' : ntuniprot(RecName_Full='Uncharacterized protein YOR105W'),
'Q08548' : ntuniprot(RecName_Full='Lysophospholipid acyltransferase'),
'Q08550' : ntuniprot(RecName_Full='Meiotic plaque component protein 54'),
'Q08553' : ntuniprot(RecName_Full='Protein SYC1'),
'Q08558' : ntuniprot(RecName_Full='Delta(3,5)-Delta(2,4)-dienoyl-CoA isomerase'),
'Q08559' : ntuniprot(RecName_Full='Protein FYV12'),
'Q08560' : ntuniprot(RecName_Full='Putative uncharacterized protein YOR186W'),
'Q08561' : ntuniprot(RecName_Full='Ino eighty subunit 4'),
'Q08562' : ntuniprot(RecName_Full='ATP-dependent helicase ULS1'),
'Q08579' : ntuniprot(RecName_Full='Thiamine transporter THI72'),
'Q08580' : ntuniprot(RecName_Full='Peroxisomal membrane protein PEX27'),
'Q08581' : ntuniprot(RecName_Full='Kinetochore protein SLK19'),
'Q08601' : ntuniprot(RecName_Full='Metacaspase-1'),
'Q08622' : ntuniprot(RecName_Full='Genetic interactor of prohibitins 3, mitochondrial'),
'Q08634' : ntuniprot(RecName_Full='Uncharacterized protein YOR238W'),
'Q08641' : ntuniprot(RecName_Full='tRNA(Thr) (cytosine(32)-N(3))-methyltransferase'),
'Q08645' : ntuniprot(RecName_Full='Folylpolyglutamate synthase'),
'Q08646' : ntuniprot(RecName_Full='Sporulation-specific protein 2'),
'Q08647' : ntuniprot(RecName_Full='Multisubstrate pseudouridine synthase 7'),
'Q08649' : ntuniprot(RecName_Full='Histone acetyltransferase ESA1'),
'Q08650' : ntuniprot(RecName_Full='Diacylglycerol O-acyltransferase 1'),
'Q08651' : ntuniprot(RecName_Full='Probable oxidoreductase ENV9'),
'Q08673' : ntuniprot(RecName_Full='Cell wall protein SRL1'),
'Q08683' : ntuniprot(RecName_Full='Anaphase-promoting complex subunit 5'),
'Q08685' : ntuniprot(RecName_Full='mRNA cleavage and polyadenylation factor CLP1 {ECO:0000255|HAMAP-Rule:MF_03035}'),
'Q08686' : ntuniprot(RecName_Full='Thiosulfate sulfurtransferase TUM1'),
'Q08687' : ntuniprot(RecName_Full='Translation machinery-associated protein 16'),
'Q08689' : ntuniprot(RecName_Full='N-alpha-acetyltransferase NAT5 {ECO:0000305}'),
'Q08692' : ntuniprot(RecName_Full='Outer spore wall protein 1'),
'Q08693' : ntuniprot(RecName_Full='Putative zinc metalloprotease TRE2'),
'Q08702' : ntuniprot(RecName_Full='Aprataxin-like protein'),
'Q08723' : ntuniprot(RecName_Full='26S proteasome regulatory subunit RPN8'),
'Q08726' : ntuniprot(RecName_Full='GPN-loop GTPase 2 {ECO:0000303|PubMed:21532343}'),
'Q08729' : ntuniprot(RecName_Full='Protein DSE3'),
'Q08732' : ntuniprot(RecName_Full='Serine/threonine-protein kinase HRK1'),
'Q08734' : ntuniprot(RecName_Full='Uncharacterized protein YOR268C'),
'Q08742' : ntuniprot(RecName_Full='Thiosulfate sulfurtransferase RDL2, mitochondrial'),
'Q08743' : ntuniprot(RecName_Full='Vacuolar membrane protein YOR292C'),
'Q08745' : ntuniprot(RecName_Full='40S ribosomal protein S10-A {ECO:0000303|PubMed:9559554}'),
'Q08746' : ntuniprot(RecName_Full='Regulator of ribosome biosynthesis'),
'Q08747' : ntuniprot(RecName_Full='Upstream activation factor subunit UAF30'),
'Q08748' : ntuniprot(RecName_Full='Uncharacterized protein YOR296W'),
'Q08749' : ntuniprot(RecName_Full='Mitochondrial import inner membrane translocase subunit TIM18'),
'Q08750' : ntuniprot(RecName_Full='Protein MUM3'),
'Q08754' : ntuniprot(RecName_Full='Bud site selection protein 7'),
'Q08760' : ntuniprot(RecName_Full='Bud site selection protein RAX1'),
'Q08773' : ntuniprot(RecName_Full='ISWI chromatin-remodeling complex ATPase ISW2'),
'Q08774' : ntuniprot(RecName_Full='Required for respiratory growth protein 7, mitochondrial'),
'Q08777' : ntuniprot(RecName_Full='Riboflavin transporter MCH5'),
'Q08816' : ntuniprot(RecName_Full='Uncharacterized protein YOR352W'),
'Q08817' : ntuniprot(RecName_Full='Leucine-rich repeat-containing protein SOG2'),
'Q08818' : ntuniprot(RecName_Full='Meiotic sister-chromatid recombination protein 6, mitochondrial'),
'Q08822' : ntuniprot(RecName_Full='Probable electron transfer flavoprotein-ubiquinone oxidoreductase, mitochondrial'),
'Q08826' : ntuniprot(RecName_Full='Sorting nexin-3'),
'Q08831' : ntuniprot(RecName_Full='Protein VTS1'),
'Q08844' : ntuniprot(RecName_Full='Uncharacterized membrane protein YOR365C'),
'Q08873' : ntuniprot(RecName_Full='Transgelin'),
'Q08886' : ntuniprot(RecName_Full='Guanine nucleotide-binding protein subunit beta 1'),
'Q08887' : ntuniprot(RecName_Full='Nuclear division defective protein 1'),
'Q08902' : ntuniprot(RecName_Full='Drug resistance protein YOR378W'),
'Q08904' : ntuniprot(RecName_Full='Protein RDR1'),
'Q08905' : ntuniprot(RecName_Full='Ferric reductase transmembrane component 3'),
'Q08906' : ntuniprot(RecName_Full='Facilitator of iron transport 2'),
'Q08907' : ntuniprot(RecName_Full='Facilitator of iron transport 3'),
'Q08908' : ntuniprot(RecName_Full='Ferric reductase transmembrane component 5'),
'Q08909' : ntuniprot(RecName_Full='Uncharacterized protein YOR385W'),
'Q08910' : ntuniprot(RecName_Full='VEL1-related protein YOR387C'),
'Q08911' : ntuniprot(RecName_Full='Formate dehydrogenase 1 {ECO:0000255|HAMAP-Rule:MF_03210, ECO:0000303|PubMed:9178506}'),
'Q08912' : ntuniprot(RecName_Full='Uncharacterized protein YOR389W'),
'Q08913' : ntuniprot(RecName_Full='Fluoride export protein 1 {ECO:0000303|PubMed:24173035}'),
'Q08914' : ntuniprot(RecName_Full='Probable glutathione-independent glyoxalase HSP33 {ECO:0000250|UniProtKB:Q04432}'),
'Q08919' : ntuniprot(RecName_Full='Uncharacterized protein TRE1'),
'Q08920' : ntuniprot(RecName_Full='Nuclear cap-binding protein subunit 2'),
'Q08921' : ntuniprot(RecName_Full='Target of rapamycin complex 1 subunit TCO89'),
'Q08923' : ntuniprot(RecName_Full='Histone deacetylase complex subunit CTI6'),
'Q08924' : ntuniprot(RecName_Full='Regulator of Ty1 transposition protein 10'),
'Q08925' : ntuniprot(RecName_Full='RNA-binding protein MRN1'),
'Q08926' : ntuniprot(RecName_Full='ULP1-interacting protein 4'),
'Q08929' : ntuniprot(RecName_Full='Glycerol uptake protein 2'),
'Q08930' : ntuniprot(RecName_Full='Ubiquitin carboxyl-terminal hydrolase MIY1 {ECO:0000303|PubMed:27292798}'),
'Q08931' : ntuniprot(RecName_Full='Pheromone-regulated membrane protein 3'),
'Q08932' : ntuniprot(RecName_Full='Ribosome assembly 1 protein'),
'Q08949' : ntuniprot(RecName_Full='DNA damage checkpoint protein 1'),
'Q08951' : ntuniprot(RecName_Full='AP-3 complex subunit delta'),
'Q08952' : ntuniprot(RecName_Full='Oxidation resistance protein 1'),
'Q08954' : ntuniprot(RecName_Full='Smr domain-containing protein YPL199C'),
'Q08955' : ntuniprot(RecName_Full='Chromosome segregation in meiosis protein 4'),
'Q08956' : ntuniprot(RecName_Full='Protein YIG1'),
'Q08957' : ntuniprot(RecName_Full='Iron-regulated transcriptional activator AFT2'),
'Q08959' : ntuniprot(RecName_Full='Phosphatidylglycerol phospholipase C'),
'Q08960' : ntuniprot(RecName_Full='S-adenosyl-L-methionine-dependent tRNA 4-demethylwyosine synthase'),
'Q08961' : ntuniprot(RecName_Full='Ribosomal lysine N-methyltransferase 1 {ECO:0000303|PubMed:16096273}'),
'Q08962' : ntuniprot(RecName_Full='60S ribosome subunit biogenesis protein NIP7'),
'Q08963' : ntuniprot(RecName_Full="U2 small nuclear ribonucleoprotein A'"),
'Q08964' : ntuniprot(RecName_Full='Putative ISWI chromatin-remodeling complex subunit YPL216W'),
'Q08965' : ntuniprot(RecName_Full='Ribosome biogenesis protein BMS1'),
'Q08966' : ntuniprot(RecName_Full='PHO85 cyclin-8'),
'Q08967' : ntuniprot(RecName_Full='Flavin carrier protein 1'),
'Q08968' : ntuniprot(RecName_Full='Protein adenylyltransferase SelO, mitochondrial {ECO:0000305}'),
'Q08969' : ntuniprot(RecName_Full='Protein GRE1'),
'Q08970' : ntuniprot(RecName_Full='Mitochondrial metal transporter 2'),
'Q08971' : ntuniprot(RecName_Full='Protein PBDC1 homolog'),
'Q08972' : ntuniprot(RecName_Full='[NU+] prion formation protein 1'),
'Q08974' : ntuniprot(RecName_Full='Uncharacterized membrane protein YPL257W'),
'Q08975' : ntuniprot(RecName_Full='Hydroxymethylpyrimidine/phosphomethylpyrimidine kinase THI21'),
'Q08977' : ntuniprot(RecName_Full='UPF0662 protein YPL260W'),
'Q08979' : ntuniprot(RecName_Full='Kelch repeat-containing protein 3'),
'Q08980' : ntuniprot(RecName_Full='Probable transport protein YPL264C'),
'Q08981' : ntuniprot(RecName_Full='APC/C-CDH1 modulator 1'),
'Q08984' : ntuniprot(RecName_Full='Uncharacterized protein YPL272C'),
'Q08985' : ntuniprot(RecName_Full='Homocysteine S-methyltransferase 2'),
'Q08986' : ntuniprot(RecName_Full='S-adenosylmethionine permease SAM3'),
'Q08989' : ntuniprot(RecName_Full='Uncharacterized protein YPL277C'),
'Q08990' : ntuniprot(RecName_Full='Putative uncharacterized protein YPL278C'),
'Q08991' : ntuniprot(RecName_Full='Fluoride export | |
person.classRank = 4
person.yearOfSchoolLeft = 0
person.classRank = person.temporaryClassRank
self.enterWorkForce(person)
if person.house == self.displayHouse:
self.textUpdateList.append(str(self.year) + ": #" + str(person.id) + " is now looking for a job.")
if (person.status == 'student' or person.status == 'outOfTownStudent') and len([x for x in person.house.occupants if x.independentStatus == True]) == 0:
person.independentStatus = True
self.enterWorkForce(person)
if person.house == self.displayHouse:
self.textUpdateList.append(str(self.year) + ": #" + str(person.id) + " is now looking for a job.")
def transitionProb (self, person, stage):
household = [x for x in person.house.occupants]
if person.father.dead + person.mother.dead != 2:
pStudy = 0
disposableIncome = 0
perCapitaDisposableIncome = self.computeDisposableIncome(household)/float(len(household))
# print('Per Capita Disposable Income: ' + str(perCapitaDisposableIncome))
if perCapitaDisposableIncome > 0.0:
forgoneSalary = self.p['incomeInitialLevels'][stage]*self.p['weeklyHours']
educationCosts = self.p['educationCosts'][stage]
# relCost = (forgoneSalary+educationCosts)/perCapitaDisposableIncome
relCost = forgoneSalary/perCapitaDisposableIncome
# Check variable
if self.year == self.p['getCheckVariablesAtYear']:
self.relativeEducationCost.append(relCost) # 0.2 - 5
incomeEffect = self.p['costantIncomeParam']/(math.exp(self.p['eduWageSensitivity']*relCost) + (self.p['costantIncomeParam']-1)) # Min-Max: 0 - 10
targetEL = max(person.father.classRank, person.mother.classRank)
dE = targetEL - stage
expEdu = math.exp(self.p['eduRankSensitivity']*dE)
educationEffect = expEdu/(expEdu+self.p['costantEduParam'])
careEffect = 1/math.exp(self.p['careEducationParam']*person.socialWork)
pStudy = incomeEffect*educationEffect*careEffect
if person.classRank == 0 and self.socialClassShares[0] > 0.2:
pStudy *= 1.0/0.9
if person.classRank == 0 and self.socialClassShares[0] < 0.2:
pStudy *= 0.85
if person.classRank == 1 and self.socialClassShares[1] > 0.35:
pStudy *= 1.0/0.8
if person.classRank == 2 and self.socialClassShares[2] > 0.25:
pStudy *= 1.0/0.85
# pStudy = math.pow(incomeEffect, self.p['incEduExp'])*math.pow(educationEffect, 1-self.p['incEduExp'])
if pStudy < 0:
pStudy = 0
# Check
if self.year == self.p['getCheckVariablesAtYear']:
self.probKeepStudying.append(pStudy)
self.stageStudent.append(stage)
else:
# print('perCapitaDisposableIncome: ' + str(perCapitaDisposableIncome))
pStudy = 0
else:
pStudy = 0
# pWork = math.exp(-1*self.p['eduEduSensitivity']*dE1)
# return (pStudy/(pStudy+pWork))
#pStudy = 0.8
return (pStudy)
def wagesGrowth(self):
for i in range(int(self.p['numberClasses'])):
self.p['pensionWage'][i] *= self.p['wageGrowthRate']
self.p['incomeInitialLevels'][i] *= self.p['wageGrowthRate']
self.p['incomeFinalLevels'][i] *= self.p['wageGrowthRate']
for i in range(4):
self.p['educationCosts'][i] *= self.p['wageGrowthRate']
self.p['pricePublicSocialCare'] *= self.p['wageGrowthRate']
self.p['priceSocialCare'] *= self.p['wageGrowthRate']
def enterWorkForce(self, person):
person.status = 'unemployed'
person.wage = self.marketWage(person)
person.income = 0
person.disposableIncome = 0
person.netIncome = 0
person.finalIncome = 0
person.jobTenure = 0
person.jobLocation = None
person.searchJob = True
def marketWage(self, person):
# Gompertz Law
k = self.p['incomeFinalLevels'][person.classRank]
r = self.p['incomeGrowthRate'][person.classRank]
c = np.log(self.p['incomeInitialLevels'][person.classRank]/k)
exp = c*math.exp(-1*r*person.workingTime)
marketWage = k*math.exp(exp)
return (marketWage)
def doDivorces(self):
menInRelationships = [x for x in self.pop.livingPeople if x.sex == 'male' and x.partner != None and x.elderlyWithFamily == False]
for man in menInRelationships:
if self.year < self.p['thePresent']:
rawSplit = self.p['basicDivorceRate'] * self.p['divorceModifierByDecade'][man.age/10]
else:
rawSplit = self.p['variableDivorce'] * self.p['divorceModifierByDecade'][man.age/10]
baseRate = self.baseRate(self.p['divorceBias'], rawSplit)
splitProb = baseRate*math.pow(self.p['divorceBias'], man.classRank)
if random.random() < splitProb:
man.movedThisYear = True
wife = man.partner
# Modfication: if wife is student, she starts working
if wife.status == 'student':
wife.independentStatus = True
self.enterWorkForce(wife)
man.partner = None
wife.partner = None
self.divorceTally += 1
if man.house == self.displayHouse:
messageString = str(self.year) + ": #" + str(man.id) + " splits with #" + str(wife.id) + "."
self.textUpdateList.append(messageString)
# Modification: if children are the man's children, they go with him
manChildren = [x for x in man.children if x.dead == False and x.house == man.house and x.father == man and x.mother != wife]
peopleToMove = [man]
peopleToMove += manChildren
self.findNewHouse(peopleToMove, man.house.town, 'doDivorces')
def doMarriages(self):
eligibleMen = []
eligibleWomen = []
for i in self.pop.livingPeople:
if i.partner == None:
# Men need to be employed to marry
if i.sex == 'male' and i.status == 'employed':
eligibleMen.append(i)
###### Otional: select a subset of eligible men based on age ##########################################
potentialGrooms = []
for m in eligibleMen:
incomeFactor = (math.exp(self.p['incomeMarriageParam']*m.income)-1)/math.exp(self.p['incomeMarriageParam']*m.income)
manMarriageProb = self.p['basicMaleMarriageProb']*self.p['maleMarriageModifierByDecade'][m.age/10]*incomeFactor
if random.random() < manMarriageProb:
potentialGrooms.append(m)
###########################################################################################################
for man in potentialGrooms: # for man in eligibleMen: #
# maxEncounters = self.datingActivity(man)
eligibleWomen = [x for x in self.pop.livingPeople if x.sex == 'female' and x.age >= self.p['minPregnancyAge'] and x.house != man.house and x.partner == None]
potentialBrides = []
for woman in eligibleWomen:
deltaAge = man.age - woman.age
if deltaAge < 20 and deltaAge > -10:
if woman.house != man.house:
if man.mother != None and woman.mother != None:
if man.mother != woman.mother and man not in woman.children and woman not in man.children:
potentialBrides.append(woman)
else:
if man not in woman.children and woman not in man.children:
potentialBrides.append(woman)
# if man.mother != None and woman.mother != None:
# if man.mother != woman.mother and man not in woman.children and woman not in man.children:
# potentialBrides.append(woman)
# else:
# if man not in woman.children and woman not in man.children:
# potentialBrides.append(woman)
if len(potentialBrides) > 0:
manTown = man.house.town
bridesWeights = []
for woman in potentialBrides:
studentFactor = 1.0
if woman.status == 'student' or woman.status == 'outOfTownStudent':
studentFactor = self.p['studentFactorParam']
womanTown = woman.house.town
geoDistance = self.manhattanDistance(manTown, womanTown)/float(self.p['mapGridXDimension'] + self.p['mapGridYDimension'])
geoFactor = 1/math.exp(self.p['betaGeoExp']*geoDistance)
statusDistance = float(abs(man.classRank-woman.classRank))/float((self.p['numberClasses']-1))
if man.classRank < woman.classRank:
betaExponent = self.p['betaSocExp']
else:
betaExponent = self.p['betaSocExp']*self.p['rankGenderBias']
socFactor = 1/math.exp(betaExponent*statusDistance)
ageFactor = self.p['deltageProb'][self.deltaAge(man.age-woman.age)]
marriageProb = geoFactor*socFactor*ageFactor*studentFactor
bridesWeights.append(marriageProb)
if sum(bridesWeights) > 0:
bridesProb = [i/sum(bridesWeights) for i in bridesWeights]
# Empty list error check
if len(potentialBrides) < 1:
print 'Error in doMarriages: the list of potential brides is empty!'
woman = np.random.choice(potentialBrides, p = bridesProb)
else:
if len(potentialBrides) < 1:
print 'Error in doMarriages: the list of potential brides is empty!'
woman = np.random.choice(potentialBrides)
man.partner = woman
man.yearMarried = self.year
woman.partner = man
woman.yearMarried = self.year
man.yearsSeparated = 0
woman.yearsSeparated = 0
man.numberPartner += 1
woman.numberPartner += 1
man.justMarried = woman.id
woman.justMarried = man.id
# childrenWithMan = [x for x in man.children if x.dead == False and x.house == man.house and
# x.justMarried == None]
# for child in childrenWithMan:
# child.justMarried = woman.id
# childrenWithWoman = [x for x in woman.children if x.dead == False and x.house == woman.house and
# x.justMarried == None]
# for child in childrenWithWoman:
# child.justMarried = man.id
self.marriageTally += 1
if man.house == self.displayHouse or woman.house == self.displayHouse:
messageString = str(self.year) + ": #" + str(man.id) + " (age " + str(man.age) + ")"
messageString += " and #" + str(woman.id) + " (age " + str(woman.age)
messageString += ") marry."
self.textUpdateList.append(messageString)
def doMarriages_Bis(self):
eligibleMen = [x for x in self.pop.livingPeople if x.sex == 'male' and x.partner == None and x.status == 'employed']
eligibleWomen = [x for x in self.pop.livingPeople if x.sex == 'female' and x.partner == None and x.age >= self.p['minPregnancyAge']]
interestedWomen = []
for w in eligibleWomen:
womanMarriageProb = self.p['basicFemaleMarriageProb']*self.p['femaleMarriageModifierByDecade'][w.age/10]
if random.random() < womanMarriageProb:
interestedWomen.append(w)
###### Otional: select a subset of eligible men based on age ##########################################
potentialGrooms = []
for man in eligibleMen:
incomeFactor = (math.exp(self.p['incomeMarriageParam']*man.income)-1)/math.exp(self.p['incomeMarriageParam']*man.income)
manMarriageProb = self.p['basicMaleMarriageProb']*self.p['maleMarriageModifierByDecade'][man.age/10]*incomeFactor
manMarriageProb *= self.p['maleMarriageMultiplier']
if random.random() < manMarriageProb:
potentialBrides = []
for woman in interestedWomen:
deltaAge = man.age - woman.age
if deltaAge < 20 and deltaAge > -10:
if woman.house != man.house:
if man.mother != None and woman.mother != None:
if man.mother != woman.mother and man not in woman.children and woman not in man.children:
potentialBrides.append(woman)
else:
if man not in woman.children and woman not in man.children:
potentialBrides.append(woman)
if len(potentialBrides) > 0:
manTown = man.house.town
bridesWeights = []
for woman in potentialBrides:
studentFactor = 1.0
if woman.status == 'student' or woman.status == 'outOfTownStudent':
studentFactor = self.p['studentFactorParam']
womanTown = woman.house.town
geoDistance = self.manhattanDistance(manTown, womanTown)/float(self.p['mapGridXDimension'] + self.p['mapGridYDimension'])
geoFactor = 1/math.exp(self.p['betaGeoExp']*geoDistance)
statusDistance = float(abs(man.classRank-woman.classRank))/float((self.p['numberClasses']-1))
if man.classRank < woman.classRank:
betaExponent = self.p['betaSocExp']
else:
betaExponent = self.p['betaSocExp']*self.p['rankGenderBias']
socFactor = 1/math.exp(betaExponent*statusDistance)
ageFactor = self.p['deltageProb'][self.deltaAge(man.age-woman.age)]
marriageProb = geoFactor*socFactor*ageFactor*studentFactor
bridesWeights.append(marriageProb)
if sum(bridesWeights) > 0:
bridesProb = [i/sum(bridesWeights) for i in bridesWeights]
if len(potentialBrides) < 1:
print 'Error in doMarriages: the list of potential brides is empty!'
woman = np.random.choice(potentialBrides, p = bridesProb)
else:
if len(potentialBrides) < 1:
print 'Error in doMarriages: the list of potential brides is empty!'
woman = np.random.choice(potentialBrides)
man.partner = woman
man.yearMarried = self.year
woman.partner = man
woman.yearMarried = self.year
man.yearsSeparated = 0
woman.yearsSeparated = 0
man.numberPartner += 1
woman.numberPartner += 1
man.justMarried = woman.id
woman.justMarried = man.id
self.marriageTally += 1
if man.house == self.displayHouse | |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, TypeVar, TYPE_CHECKING
)
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
from ._shared.response_handlers import return_response_headers, process_storage_error
from ._serialize import get_modify_conditions
if TYPE_CHECKING:
from datetime import datetime
BlobClient = TypeVar("BlobClient")
ContainerClient = TypeVar("ContainerClient")
class BlobLeaseClient(object):
"""Creates a new BlobLeaseClient.
This client provides lease operations on a BlobClient or ContainerClient.
:ivar str id:
The ID of the lease currently being maintained. This will be `None` if no
lease has yet been acquired.
:ivar str etag:
The ETag of the lease currently being maintained. This will be `None` if no
lease has yet been acquired or modified.
:ivar ~datetime.datetime last_modified:
The last modified timestamp of the lease currently being maintained.
This will be `None` if no lease has yet been acquired or modified.
:param client:
The client of the blob or container to lease.
:type client: ~azure.storage.blob.BlobClient or
~azure.storage.blob.ContainerClient
:param str lease_id:
A string representing the lease ID of an existing lease. This value does not
need to be specified in order to acquire a new lease, or break one.
"""
def __init__(
self, client, lease_id=None
): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
# type: (Union[BlobClient, ContainerClient], Optional[str]) -> None
self.id = lease_id or str(uuid.uuid4())
self.last_modified = None
self.etag = None
if hasattr(client, 'blob_name'):
self._client = client._client.blob # type: ignore # pylint: disable=protected-access
elif hasattr(client, 'container_name'):
self._client = client._client.container # type: ignore # pylint: disable=protected-access
else:
raise TypeError("Lease must use either BlobClient or ContainerClient.")
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
@distributed_trace
def acquire(self, lease_duration=-1, **kwargs):
# type: (int, **Any) -> None
"""Requests a new lease.
If the container does not have an active lease, the Blob service creates a
lease on the container and returns a new lease ID.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. Default is -1 (infinite lease).
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
"""
mod_conditions = get_modify_conditions(kwargs)
try:
response = self._client.acquire_lease(
timeout=kwargs.pop('timeout', None),
duration=lease_duration,
proposed_lease_id=self.id,
modified_access_conditions=mod_conditions,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
self.id = response.get('lease_id') # type: str
self.last_modified = response.get('last_modified') # type: datetime
self.etag = response.get('etag') # type: str
@distributed_trace
def renew(self, **kwargs):
# type: (Any) -> None
"""Renews the lease.
The lease can be renewed if the lease ID specified in the
lease client matches that associated with the container or blob. Note that
the lease may be renewed even if it has expired as long as the container
or blob has not been leased again since the expiration of that lease. When you
renew a lease, the lease duration clock resets.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: None
"""
mod_conditions = get_modify_conditions(kwargs)
try:
response = self._client.renew_lease(
lease_id=self.id,
timeout=kwargs.pop('timeout', None),
modified_access_conditions=mod_conditions,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
self.etag = response.get('etag') # type: str
self.id = response.get('lease_id') # type: str
self.last_modified = response.get('last_modified') # type: datetime
@distributed_trace
def release(self, **kwargs):
# type: (Any) -> None
"""Release the lease.
The lease may be released if the client lease id specified matches
that associated with the container or blob. Releasing the lease allows another client
to immediately acquire the lease for the container or blob as soon as the release is complete.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: None
"""
mod_conditions = get_modify_conditions(kwargs)
try:
response = self._client.release_lease(
lease_id=self.id,
timeout=kwargs.pop('timeout', None),
modified_access_conditions=mod_conditions,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
self.etag = response.get('etag') # type: str
self.id = response.get('lease_id') # type: str
self.last_modified = response.get('last_modified') # type: datetime
@distributed_trace
def change(self, proposed_lease_id, **kwargs):
# type: (str, Any) -> None
"""Change the lease ID of an active lease.
:param str proposed_lease_id:
Proposed lease ID, in a GUID string format. The Blob service returns 400
(Invalid request) if the proposed lease ID is not in the correct format.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it | |
"""Micro moments."""
import abc
import collections
from typing import Any, Dict, Hashable, List, Optional, Sequence, TYPE_CHECKING
import numpy as np
from . import options
from .utilities.basics import Array, StringRepresentation, format_number, format_table
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from .economies.economy import Economy # noqa
class Moment(StringRepresentation):
"""Information about a single micro moment."""
value: Array
market_ids: Optional[Array]
def __init__(self, value: float, market_ids: Optional[Sequence] = None) -> None:
"""Validate information about the moment to the greatest extent possible without an economy instance."""
self.value = np.asarray(value, options.dtype)
if self.value.size != 1:
raise ValueError("The micro moment value must be a scalar.")
self.market_ids = None
if market_ids is not None:
self.market_ids = np.asarray(market_ids, np.object)
unique, counts = np.unique(self.market_ids, return_counts=True)
duplicates = unique[counts > 1]
if duplicates.size > 0:
raise ValueError(f"The following market IDs are duplicated in market_ids: {duplicates}.")
def __str__(self) -> str:
"""Format information about the micro moment as a string."""
return f"{self._format_markets()}: {self._format_moment()}"
def _format_markets(self) -> str:
"""Format information about the markets associated with the micro moment as a string."""
if self.market_ids is None:
return "All"
if len(self.market_ids) == 1:
return self.market_ids[0]
return ", ".join(str(t) for t in self.market_ids)
def _format_moment(self) -> str:
"""Construct a string expression for the micro moment."""
formatted = self._format_value()
if self.value < 0:
formatted = f"{formatted} + {format_number(float(self.value))[1:]}"
elif self.value > 0:
formatted = f"{formatted} - {format_number(float(self.value))[1:]}"
return formatted
@abc.abstractmethod
def _format_value(self) -> str:
"""Construct a string expression for the micro moment value."""
def _validate(self, economy: 'Economy') -> None:
"""Check that all market IDs associated with this moment are in the economy."""
if self.market_ids is not None:
extra_ids = set(self.market_ids) - set(economy.unique_market_ids)
if extra_ids:
raise ValueError(f"market_ids contains the following extra IDs: {sorted(extra_ids)}.")
class DemographicExpectationMoment(Moment):
r"""Configuration for micro moments that match expectations of demographics for agents who choose certain products.
For example, micro data can sometimes be used to compute the mean of a demographic such as income, :math:`y_{it}`,
for agents who choose product :math:`j`. With the value :math:`v_m` of this mean, a micro moment :math:`m` in market
:math:`t` for agent :math:`i` can be defined by
.. math:: g_{M,imt} = \frac{y_{it} s_{ijt}}{s_{jt}} - v_m.
Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which
the micro data used to compute :math:`v_m` are relevant, which gives :math:`\bar{g}_{M,m}` in
:eq:`averaged_micro_moments`.
Parameters
----------
product_id : `object`
ID of the product :math:`j` or ``None`` to denote the outside option :math:`j = 0`. If not ``None``, there must
be exactly one of this ID in the ``product_ids`` field of ``product_data`` in :class:`Problem` or
:class:`Simulation` for each market over which this micro moment will be averaged.
demographics_index : `int`
Column index of the demographic :math:`y_{it}` (which can be any demographic, not just income) in the matrix of
agent demographics, :math:`d`. This should be between zero and :math:`D - 1`, inclusive.
value : `float`
Value :math:`v_m` of the mean estimated from micro data.
market_ids : `array-like, optional`
Distinct market IDs over which the micro moments will be averaged to get :math:`\bar{g}_{M,m}`. These are also
the only markets in which the moments will be computed. By default, the moments are computed for and averaged
across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on
average the same for all markets.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
product_id: Optional[Any]
demographics_index: int
def __init__(
self, product_id: Optional[Any], demographics_index: int, value: float,
market_ids: Optional[Sequence] = None) -> None:
"""Validate information about the moment to the greatest extent possible without an economy instance."""
super().__init__(value, market_ids)
if not isinstance(demographics_index, int) or demographics_index < 0:
raise ValueError("demographics_index must be a positive int.")
self.product_id = product_id
self.demographics_index = demographics_index
def _format_value(self) -> str:
"""Construct a string expression for the covariance moment."""
product = "Outside" if self.product_id is None else f"'{self.product_id}'"
return f"E[Demographic Column {self.demographics_index} | {product}]"
def _validate(self, economy: 'Economy') -> None:
"""Check that matrix indices are valid in the economy."""
super()._validate(economy)
economy._validate_product_id(self.product_id, self.market_ids)
if self.demographics_index >= economy.D:
raise ValueError(f"demographics_index must be between 0 and D = {economy.D}, inclusive.")
class DemographicCovarianceMoment(Moment):
r"""Configuration for micro moments that match covariances between product characteristics and demographics.
For example, micro data can sometimes be used to compute the sample covariance between a product characteristic
:math:`x_{jt}` of an agent's choice :math:`j`, and a demographic such as income, :math:`y_{it}`, amongst those
agents who purchase an inside good. With the value :math:`v_m` of this sample covariance, a micro moment :math:`m`
in market :math:`t` for agent :math:`i` can be defined by
.. math:: g_{M,imt} = (z_{it} - \bar{z}_t)(y_{it} - \bar{y}_t) - v_m
where :math:`\bar{z}_t = \sum_i w_{it} z_{it}`, :math:`\bar{y}_t = \sum_i w_{it} y_{it}`, and conditional on
choosing an inside good, the expected value of :math:`x_{jt}` for agent :math:`i` is
.. math:: z_{it} = \sum_{j \in J_t} x_{jt}s_{ij(-0)t}
where :math:`s_{ij(-0)t} = s_{ijt} / (1 - s_{i0t})` is the probability of :math:`i` choosing :math:`j` when the
outside option is removed from the choice set.
Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which
the micro data used to compute :math:`v_m` are relevant, which gives :math:`\bar{g}_{M,m}` in
:eq:`averaged_micro_moments`.
Parameters
----------
X2_index : `int`
Column index of :math:`x_{jt}` in the matrix of demand-side nonlinear product characteristics, :math:`X_2`. This
should be between zero and :math:`K_2 - 1`, inclusive.
demographics_index : `int`
Column index of the demographic :math:`y_{it}` (which can be any demographic, not just income) in the matrix of
agent demographics, :math:`d`. This should be between zero and :math:`D - 1`, inclusive.
value : `float`
Value :math:`v_m` of the sample covariance estimated from micro data.
market_ids : `array-like, optional`
Distinct market IDs over which the micro moments will be averaged to get :math:`\bar{g}_{M,m}`. These are also
the only markets in which the moments will be computed. By default, the moments are computed for and averaged
across all markets. That is, by default, it is assumed that the specified ``value`` is relevant for and on
average the same for all markets.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
X2_index: int
demographics_index: int
def __init__(
self, X2_index: int, demographics_index: int, value: float, market_ids: Optional[Sequence] = None) -> None:
"""Validate information about the moment to the greatest extent possible without an economy instance."""
super().__init__(value, market_ids)
if not isinstance(X2_index, int) or X2_index < 0:
raise ValueError("X2_index must be a positive int.")
if not isinstance(demographics_index, int) or demographics_index < 0:
raise ValueError("demographics_index must be a positive int.")
self.X2_index = X2_index
self.demographics_index = demographics_index
def _format_value(self) -> str:
"""Construct a string expression for the covariance moment."""
return f"Cov(X2 Column {self.X2_index}, Demographic Column {self.demographics_index} | Inside)"
def _validate(self, economy: 'Economy') -> None:
"""Check that matrix indices are valid in the economy."""
super()._validate(economy)
if self.X2_index >= economy.K2:
raise ValueError(f"X2_index must be between 0 and K2 = {economy.K2}, inclusive.")
if self.demographics_index >= economy.D:
raise ValueError(f"demographics_index must be between 0 and D = {economy.D}, inclusive.")
class DiversionProbabilityMoment(Moment):
r"""Configuration for micro moments that match second choice probabilities of certain products for agents whose
first choices are certain other products.
For example, micro data can sometimes be used to compute the share of agents who would choose product :math:`k` if
:math:`j` were removed from the choice set, out of those agents whose first choice is :math:`j`. With the value
:math:`v_m` of this share, a micro moment :math:`m` in market :math:`t` for agent :math:`i` can be defined by
.. math:: g_{M,imt} = \frac{s_{ik(-j)t} s_{ijt}}{s_{jt}} - v_m
where :math:`s_{ik(-j)t} = s_{ijt} / (1 - s_{ijt})` is the probability of :math:`i` choosing :math:`k` when
:math:`j` is removed from the choice set. Rearranging terms gives the equivalent definition
.. math:: g_{M,imt} = \frac{s_{ik(-j)t} - s_{ikt}}{s_{jt}} - v_m,
which is more reminiscent of the long-run diversion ratios :math:`\bar{\mathscr{D}}_{jk}` computed by
:meth:`ProblemResults.compute_long_run_diversion_ratios`.
Integrals of these micro moments are approximated within and averaged across a set :math:`T_m` of markets in which
the micro data used to compute :math:`v_m` | |
<reponame>bsaakash/new_repo<filename>src/UQpy/UQpyModules.py
import os
import shutil
import UQpy as uq
import numpy as np
import sys
class RunCommandLine:
def __init__(self, argparseobj):
os.system('clear')
self.args = argparseobj
################################################################################################################
# Read UQpy parameter file
os.chdir(os.path.join(os.getcwd(), self.args.Model_directory))
if not os.path.isfile('UQpy_Params.txt'):
print("Error: UQpy parameters file does not exist")
sys.exit()
else:
from src.UQpy.ReadInputFile import readfile
data = readfile('UQpy_Params.txt')
################################################################################################################
# Run UQpy
print("\nExecuting UQpy...\n")
################################################################################################################
# Run Selected method
if data['method'] in ['SuS']:
self.run_reliability(data)
elif data['method'] in ['mcs', 'lhs', 'mcmc', 'pss', 'sts']:
self.run_uq(data)
def run_uq(self, data):
# Steps:
# Initialize the sampling method (Check if UQpy_Params.txt contains all the necessary information)
# Actually run the selected sampling method in U(0, 1) and transform to the original space
# Save the samples in a .txt (or .csv) file
# If a solver (black box model) is provided then:
# If parallel processing is selected: Split the samples into chunks
# Run the model
# Save the model evaluations
# Brute-force sampling methods. Set the adaptivity flag False
self.args.Adaptive = False
################################################################################################################
# Initialize the requested UQpy method: Check if all necessary parameters are defined in the UQpyParams.txt file
from src.UQpy.SampleMethods import init_sm, run_sm
init_sm(data)
################################################################################################################
# Run the requested UQpy method
rvs = run_sm(data)
# Save the samples in a .txt file
np.savetxt('UQpy_Samples.txt', rvs.samples, fmt='%0.5f')
# Save the samples in a .csv file
if 'names of parameters' not in data:
import itertools
data['names of parameters'] = list(itertools.repeat('#name', rvs.samples.shape[1]))
save_csv(data['names of parameters'], rvs.samples)
################################################################################################################
# If a model is provided then run it
if self.args.Solver is not None:
RunModel(self.args.CPUs, self.args.Solver, self.args.Input_Shell_Script, self.args.Output_Shell_Script,
self.args.Adaptive, rvs.dimension)
################################################################################################################
print("\nSuccessful execution of UQpy\n\n")
def run_reliability(self, data):
from lib.UQpy.Reliability import init_rm, run_rm
init_rm(data)
if data['method'] == 'SuS':
from UQpy.Reliability import SubsetSimulation
self.args.CPUs_flag = True
self.args.ParallelProcessing = False
self.args.Adaptive = True
sus = run_rm(self, data)
# Save the samples in a .txt file
np.savetxt('UQpy_Samples.txt', sus.samples, fmt='%0.5f')
# Save the samples in a .csv file
save_csv(data['Names of random variables'], sus.samples)
# Save the probability of failure in a .txt file
print(sus.pf)
with open('PF.txt', 'wb') as f:
np.savetxt(f, [sus.pf], fmt='%0.6f')
np.savetxt(f, [sus.cov], fmt='%0.6f')
################################################################################################################
# Move the data to directory simUQpyOut/ , delete the temp/ directory
# and terminate the program
_files = list()
_files.append('UQpy_Samples.csv')
_files.append('UQpy_Samples.txt')
_files.append('PF.txt')
for file_name in _files:
full_file_name = os.path.join(self.args.WorkingDir, file_name)
shutil.copy(full_file_name, self.args.Output_directory)
shutil.rmtree(self.args.WorkingDir)
shutil.move(self.args.Output_directory, self.args.Model_directory)
################################################################################################################
print("\nSuccessful execution of UQpy\n\n")
class RunModel:
"""
A class used to run the computational model.
:param cpu:
:param solver:
:param input_:
:param output_:
:param adaptive:
:param dimension:
"""
def __init__(self, cpu=None, solver=None, input_=None, output_=None, adaptive=None, dimension=None):
self.CPUs = cpu
self.model_script = solver
self.input_script = input_
self.output_script = output_
self.Adaptive = adaptive
self.dimension = dimension
import shutil
current_dir = os.getcwd()
################################################################################################################
# Create a unique temporary directory. Remove after completion.
folder_name = 'simUQpyOut'
output_directory = os.path.join(os.sep, current_dir, folder_name)
model_files = list()
for fname in os.listdir(current_dir):
path = os.path.join(current_dir, fname)
if not os.path.isdir(path):
model_files.append(path)
dir_path = os.path.join(current_dir, 'tmp')
if os.path.exists(dir_path) and os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.makedirs('tmp', exist_ok=False)
work_dir = os.path.join(os.sep, current_dir, 'tmp')
# copy UQ_samples.txt to working-directory
for file_name in model_files:
full_file_name = os.path.join(current_dir, file_name)
shutil.copy(full_file_name, work_dir)
os.chdir(os.path.join(current_dir, work_dir))
if self.CPUs != 0 and self.CPUs is not None:
parallel_processing = True
import multiprocessing
n_cpu = multiprocessing.cpu_count()
if self.CPUs > n_cpu:
print("Error: You have available {0:1d} CPUs. Start parallel computing using {0:1d} CPUs".format(n_cpu))
self.CPUs = n_cpu
else:
parallel_processing = False
print("\nEvaluating the model...\n")
import time
start_time = time.time()
if parallel_processing is True:
self.values = self.multi_core()
else:
self.values = self.run_model()
end_time = time.time()
print('Total time:', end_time - start_time, "(sec)")
################################################################################################################
# Move the data to directory simUQpyOut
os.makedirs(output_directory, exist_ok=True)
path = os.path.join(current_dir, work_dir)
src_files = [filename for filename in os.listdir(path) if filename.startswith("Model_")]
for file_name in src_files:
full_file_name = os.path.join(path, file_name)
shutil.copy(full_file_name, output_directory)
################################################################################################################
# Delete the tmp working directory directory
shutil.rmtree(work_dir)
os.chdir(current_dir)
def run_model(self):
# Load the UQpyOut.txt
values = np.loadtxt('UQpy_Samples.txt', dtype=np.float32)
if self.Adaptive is True:
values = values.reshape(1, values.shape[0])
if self.dimension == 1:
values = values.reshape(values.shape[0], self.dimension)
model_eval = list()
for i in range(values.shape[0]):
# Write each value of UQpyOut.txt into a *.txt file
with open('UQpy_run_{0}.txt'.format(i), 'wb') as f:
np.savetxt(f, values[i, :], fmt='%0.5f')
# Run the Input_Shell_Script.sh in order to create the input file for the model
if self.input_script.lower().endswith('.sh'):
join_input_script = './{0} {1}'.format(self.input_script, i)
os.system(join_input_script)
else:
print('Unrecognized type of Input file')
sys.exit()
# Run the Model.sh in order to run the model
if self.model_script.lower().endswith('.sh'):
join_model_script = './{0} {1}'.format(self.model_script, i)
os.system(join_model_script)
else:
print('Unrecognized type of model file')
sys.exit()
# Run the Output_Shell_Script.sh in order to create the input file of the model for UQpy
if self.output_script.lower().endswith('.sh'):
join_output_script = './{0} {1}'.format(self.output_script, i)
os.system(join_output_script)
else:
print('Unrecognized type of Input file')
sys.exit()
model_eval.append(np.loadtxt('UQpy_eval_{}.txt'.format(i)))
src_files = 'UQpy_eval_{0}.txt'.format(int(i))
file_new = src_files.replace("UQpy_eval_{0}.txt".format(int(i)), "Model_{0}.txt".format(int(i)))
os.rename(src_files, file_new)
return model_eval
def run_parallel_model(self, args, multi=False, queue=0):
import os
from multiprocessing import Lock
j = args
# Define the executable shell scripts for the model
# Load the UQpyOut.txt
values = np.loadtxt('UQpy_Batch_{0}.txt'.format(j+1), dtype=np.float32)
index_temp = np.loadtxt('UQpy_Batch_index_{0}.txt'.format(j + 1))
index = list()
for i in range(index_temp.size):
if index_temp.size == 1:
index.append(index_temp)
else:
index.append(index_temp[i])
if values.size == 1:
values = values.reshape(1, 1)
if len(values.shape) == 1 and self.dimension != 1:
values = values.reshape(1, values.shape[0])
elif len(values.shape) == 1 and self.dimension == 1:
values = values.reshape(values.shape[0], 1)
os.remove('UQpy_Batch_{0}.txt'.format(j+1))
os.remove('UQpy_Batch_index_{0}.txt'.format(j + 1))
model_eval = list()
count = 0
for i in index:
lock = Lock()
lock.acquire() # will block if lock is already held
# Write each value of UQpyOut.txt into a *.txt file
np.savetxt('UQpy_run_{0}.txt'.format(int(i)), values[count, :], newline=' ', delimiter=',', fmt='%0.5f')
# Run the Input_Shell_Script.sh in order to create the input file for the model
if self.input_script.lower().endswith('.sh'):
join_input_script = './{0} {1}'.format(self.input_script, int(i))
os.system(join_input_script)
else:
print('Unrecognized type of Input file')
sys.exit()
# Run the Model.sh in order to run the model
if self.model_script.lower().endswith('.sh'):
join_model_script = './{0} {1}'.format(self.model_script, int(i))
os.system(join_model_script)
else:
print('Unrecognized type of model file')
sys.exit()
# Run the Output_Shell_Script.sh in order to create the input file of the model for UQpy
if self.output_script.lower().endswith('.sh'):
join_output_script = './{0} {1}'.format(self.output_script, int(i))
os.system(join_output_script)
else:
print('Unrecognized type of Input file')
sys.exit()
model_eval.append(np.loadtxt('UQpy_eval_{0}.txt'.format(int(i))))
src_files = 'UQpy_eval_{0}.txt'.format(int(i))
file_new = src_files.replace("UQpy_eval_{0}.txt".format(int(i)), "Model_{0}.txt".format(int(i)))
os.rename(src_files, file_new)
count = count + 1
lock.release()
if multi:
queue.put(model_eval)
return model_eval
def multi_core(self):
from multiprocessing import Process
from multiprocessing import Queue
samples = np.loadtxt('UQpy_Samples.txt', dtype=np.float32)
if samples.shape[0] <= self.CPUs:
self.CPUs = samples.shape[0]
print('The number of CPUs used is\n %', samples.shape[0])
if len(samples.shape) == 1:
samples = samples.reshape(samples.shape[0], 1)
chunk_samples_cores(samples, self)
results = []
queues = [Queue() for i in range(self.CPUs)]
args = [(i, True, queues[i]) for i in range(self.CPUs)]
jobs = [Process(target=self.run_parallel_model, args=a) for a in args]
for j in jobs:
j.start()
for q in queues:
results.append(q.get())
for j in jobs:
j.join()
return results
def chunk_samples_cores(samples, args):
# In case of parallel computing divide the samples into chunks in order to sent to each processor
chunks = args.CPUs
if args.Adaptive is True:
for i in range(args.CPUs):
np.savetxt('UQpy_Batch_{0}.txt'.format(i+1), samples[range(i-1, i), :],fmt='%0.5f')
np.savetxt('UQpy_Batch_index_{0}.txt'.format(i+1), np.array(i).reshape(1,))
else:
size = np.array([np.ceil(samples.shape[0]/chunks) for i in range(args.CPUs)]).astype(int)
dif = np.sum(size) - samples.shape[0]
count = 0
for k in range(dif):
size[count] = size[count] - 1
count = count + 1
for i in range(args.CPUs):
if i == 0:
lines = range(size[i])
else:
lines = range(int(np.sum(size[:i])), int(np.sum(size[:i+1])))
np.savetxt('UQpy_Batch_{0}.txt'.format(i+1), samples[lines, :], fmt='%0.5f')
np.savetxt('UQpy_Batch_index_{0}.txt'.format(i+1), lines)
def chunk_samples_nodes(samples, args):
# In case of cluster divide the samples into chunks in order to sent to each processor
chunks = args.nodes
size = np.array([np.ceil(samples.shape[0]/chunks) in range(args.nodes)]).astype(int)
dif = np.sum(size) - samples.shape[0]
count = 0
for k in range(dif):
size[count] = size[count] - 1
count = count + 1
for i in range(args.nodes):
if i == 0:
lines = range(0, size[i])
else:
lines = range(int(np.sum(size[:i])), int(np.sum(size[:i+1])))
np.savetxt('UQpy_Batch_{0}.txt'.format(i+1), samples[lines, :], fmt='%0.5f')
np.savetxt('UQpy_Batch_index_{0}.txt'.format(i+1), lines)
def save_csv(headers, param_values):
index = np.array(range(1, param_values.shape[0] + 1)).astype(int)
param_values = np.hstack((index.reshape(index.shape[0], 1), param_values))
expand_header = list()
expand_header.append('Run')
for i in range(len(headers)):
expand_header.append(headers[i])
import csv
with open('UQpy_Samples.csv', "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerow(expand_header)
for val in param_values:
writer.writerow(val)
def save_txt(headers, param_values):
index = np.array(range(1, param_values.shape[0] + 1)).astype(int)
param_values = np.hstack((index.reshape(index.shape[0], 1), param_values))
expand_header | |
# coding: utf-8
"""
Module ``mock``
---------------
Wrapper to unittest.mock reducing the boilerplate when testing asyncio powered
code.
A mock can behave as a coroutine, as specified in the documentation of
:class:`~asynctest.mock.Mock`.
"""
import asyncio
import asyncio.coroutines
import contextlib
import enum
import functools
import inspect
import sys
import types
import unittest.mock
# From python 3.6, a sentinel object is used to mark coroutines (rather than
# a boolean) to prevent a mock/proxy object to return a truthy value.
# see: https://github.com/python/asyncio/commit/ea776a11f632a975ad3ebbb07d8981804aa292db
try:
_is_coroutine = asyncio.coroutines._is_coroutine
except AttributeError:
_is_coroutine = True
class _AsyncIterator:
"""
Wraps an iterator in an asynchronous iterator.
"""
def __init__(self, iterator):
self.iterator = iterator
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.iterator)
except StopIteration:
pass
raise StopAsyncIteration
# magic methods which must be coroutine functions
async_magic_coroutines = ("__aenter__", "__aexit__", "__anext__")
# all magic methods used in an async context
_async_magics = async_magic_coroutines + ("__aiter__", )
# We use unittest.mock.MagicProxy which works well, but it's not aware that
# we want __aexit__ to return a falsy value by default.
# We add the entry in unittest internal dict as it will not change the
# normal behavior of unittest.
unittest.mock._return_values["__aexit__"] = False
def _get_async_iter(mock):
"""
Factory of ``__aiter__`` magic methods for a MagicMock.
It creates a function which returns an asynchronous iterator based on the
return value of ``mock.__aiter__``.
Since __aiter__ used could be a coroutine in Python 3.5 and 3.6, we also
support this case.
See: https://www.python.org/dev/peps/pep-0525/#id23
"""
def __aiter__():
return_value = mock.__aiter__._mock_return_value
if return_value is DEFAULT:
iterator = iter([])
else:
iterator = iter(return_value)
return _AsyncIterator(iterator)
if asyncio.iscoroutinefunction(mock.__aiter__):
return asyncio.coroutine(__aiter__)
return __aiter__
unittest.mock._side_effect_methods["__aiter__"] = _get_async_iter
async_magic_coroutines = set(async_magic_coroutines)
_async_magics = set(_async_magics)
# This changes the behavior of unittest, but the change is minor and is
# probably better than overriding __set/get/del attr__ everywhere.
unittest.mock._all_magics |= _async_magics
def _raise(exception):
raise exception
def _make_native_coroutine(coroutine):
"""
Wrap a coroutine (or any function returning an awaitable) in a native
coroutine.
"""
if inspect.iscoroutinefunction(coroutine):
# Nothing to do.
return coroutine
@functools.wraps(coroutine)
async def wrapper(*args, **kwargs):
return await coroutine(*args, **kwargs)
return wrapper
def _is_started(patching):
if isinstance(patching, _patch_dict):
return patching._is_started
else:
return unittest.mock._is_started(patching)
class FakeInheritanceMeta(type):
"""
A metaclass which recreates the original inheritance model from
unittest.mock.
- NonCallableMock > NonCallableMagicMock
- NonCallable > Mock
- Mock > MagicMock
"""
def __init__(self, name, bases, attrs):
attrs['__new__'] = types.MethodType(self.__new, self)
super().__init__(name, bases, attrs)
@staticmethod
def __new(cls, *args, **kwargs):
new = type(cls.__name__, (cls, ), {'__doc__': cls.__doc__})
return object.__new__(new, *args, **kwargs)
def __instancecheck__(cls, obj):
# That's tricky, each type(mock) is actually a subclass of the actual
# Mock type (see __new__)
if super().__instancecheck__(obj):
return True
_type = type(obj)
if issubclass(cls, NonCallableMock):
if issubclass(_type, (NonCallableMagicMock, Mock, )):
return True
if issubclass(cls, Mock) and not issubclass(cls, CoroutineMock):
if issubclass(_type, (MagicMock, )):
return True
return False
def _get_is_coroutine(self):
return self.__dict__['_mock_is_coroutine']
def _set_is_coroutine(self, value):
# property setters and getters are overridden by Mock(), we need to
# update the dict to add values
value = _is_coroutine if bool(value) else False
self.__dict__['_mock_is_coroutine'] = value
# _mock_add_spec() is the actual private implementation in unittest.mock, we
# override it to support coroutines in the metaclass.
def _mock_add_spec(self, spec, *args, **kwargs):
unittest.mock.NonCallableMock._mock_add_spec(self, spec, *args, **kwargs)
_spec_coroutines = []
for attr in dir(spec):
if asyncio.iscoroutinefunction(getattr(spec, attr)):
_spec_coroutines.append(attr)
self.__dict__['_spec_coroutines'] = _spec_coroutines
def _get_child_mock(self, *args, **kwargs):
_new_name = kwargs.get("_new_name")
if _new_name in self.__dict__['_spec_coroutines']:
return CoroutineMock(*args, **kwargs)
_type = type(self)
if issubclass(_type, MagicMock) and _new_name in async_magic_coroutines:
klass = CoroutineMock
elif issubclass(_type, CoroutineMock):
klass = MagicMock
elif not issubclass(_type, unittest.mock.CallableMixin):
if issubclass(_type, unittest.mock.NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock):
klass = Mock
else:
klass = _type.__mro__[1]
return klass(*args, **kwargs)
class MockMetaMixin(FakeInheritanceMeta):
def __new__(meta, name, base, namespace):
if not any((isinstance(baseclass, meta) for baseclass in base)):
# this ensures that inspect.iscoroutinefunction() doesn't return
# True when testing a mock.
code_mock = unittest.mock.NonCallableMock(spec_set=types.CodeType)
code_mock.co_flags = 0
namespace.update({
'_mock_add_spec': _mock_add_spec,
'_get_child_mock': _get_child_mock,
'__code__': code_mock,
})
return super().__new__(meta, name, base, namespace)
class IsCoroutineArgMeta(MockMetaMixin):
def __new__(meta, name, base, namespace):
if not any((isinstance(baseclass, meta) for baseclass in base)):
namespace.update({
'_asynctest_get_is_coroutine': _get_is_coroutine,
'_asynctest_set_is_coroutine': _set_is_coroutine,
'is_coroutine': property(_get_is_coroutine, _set_is_coroutine,
doc="True if the object mocked is a coroutine"),
'_is_coroutine': property(_get_is_coroutine),
})
wrapped_setattr = namespace.get("__setattr__", base[0].__setattr__)
def __setattr__(self, attrname, value):
if attrname == 'is_coroutine':
self._asynctest_set_is_coroutine(value)
else:
return wrapped_setattr(self, attrname, value)
namespace['__setattr__'] = __setattr__
return super().__new__(meta, name, base, namespace)
class AsyncMagicMixin:
"""
Add support for async magic methods to :class:`MagicMock` and
:class:`NonCallableMagicMock`.
Actually, it's a shameless copy-paste of :class:`unittest.mock.MagicMixin`:
when added to our classes, it will just do exactly what its
:mod:`unittest` counterpart does, but for magic methods. It adds some
behavior but should be compatible with future additions of
:class:`MagicMock`.
"""
# Magic methods are invoked as type(obj).__magic__(obj), as seen in
# PEP-343 (with) and PEP-492 (async with)
def __init__(self, *args, **kwargs):
self._mock_set_async_magics() # make magic work for kwargs in init
unittest.mock._safe_super(AsyncMagicMixin, self).__init__(*args, **kwargs)
self._mock_set_async_magics() # fix magic broken by upper level init
def _mock_set_async_magics(self):
these_magics = _async_magics
if getattr(self, "_mock_methods", None) is not None:
these_magics = _async_magics.intersection(self._mock_methods)
remove_magics = _async_magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, unittest.mock.MagicProxy(entry, self))
def mock_add_spec(self, *args, **kwargs):
unittest.mock.MagicMock.mock_add_spec(self, *args, **kwargs)
self._mock_set_async_magics()
def __setattr__(self, name, value):
_mock_methods = getattr(self, '_mock_methods', None)
if _mock_methods is None or name in _mock_methods:
if name in _async_magics:
if not unittest.mock._is_instance_mock(value):
setattr(type(self), name,
unittest.mock._get_method(name, value))
original = value
def value(*args, **kwargs):
return original(self, *args, **kwargs)
else:
unittest.mock._check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
return object.__setattr__(self, name, value)
unittest.mock._safe_super(AsyncMagicMixin, self).__setattr__(name, value)
# Notes about unittest.mock:
# - MagicMock > Mock > NonCallableMock (where ">" means inherits from)
# - when a mock instance is created, a new class (type) is created
# dynamically,
# - we *must* use magic or object's internals when we want to add our own
# properties, and often override __getattr__/__setattr__ which are used
# in unittest.mock.NonCallableMock.
class NonCallableMock(unittest.mock.NonCallableMock,
metaclass=IsCoroutineArgMeta):
"""
Enhance :class:`unittest.mock.NonCallableMock` with features allowing to
mock a coroutine function.
If ``is_coroutine`` is set to ``True``, the :class:`NonCallableMock`
object will behave so :func:`asyncio.iscoroutinefunction` will return
``True`` with ``mock`` as parameter.
If ``spec`` or ``spec_set`` is defined and an attribute is get,
:class:`~asynctest.CoroutineMock` is returned instead of
:class:`~asynctest.Mock` when the matching spec attribute is a coroutine
function.
The test author can also specify a wrapped object with ``wraps``. In this
case, the :class:`~asynctest.Mock` object behavior is the same as with an
:class:`unittest.mock.Mock` object: the wrapped object may have methods
defined as coroutine functions.
See :class:`unittest.mock.NonCallableMock`
"""
def __init__(self, spec=None, wraps=None, name=None, spec_set=None,
is_coroutine=None, parent=None, **kwargs):
super().__init__(spec=spec, wraps=wraps, name=name, spec_set=spec_set,
parent=parent, **kwargs)
self._asynctest_set_is_coroutine(is_coroutine)
class NonCallableMagicMock(AsyncMagicMixin, unittest.mock.NonCallableMagicMock,
metaclass=IsCoroutineArgMeta):
"""
A version of :class:`~asynctest.MagicMock` that isn't callable.
"""
def __init__(self, spec=None, wraps=None, name=None, spec_set=None,
is_coroutine=None, parent=None, **kwargs):
super().__init__(spec=spec, wraps=wraps, name=name, spec_set=spec_set,
parent=parent, **kwargs)
self._asynctest_set_is_coroutine(is_coroutine)
class Mock(unittest.mock.Mock, metaclass=MockMetaMixin):
"""
Enhance :class:`unittest.mock.Mock` so it returns
a :class:`~asynctest.CoroutineMock` object instead of
a :class:`~asynctest.Mock` object where a method on a ``spec`` or
``spec_set`` object is a coroutine.
For instance:
>>> class Foo:
... @asyncio.coroutine
... def foo(self):
... pass
...
... def bar(self):
... pass
>>> type(asynctest.mock.Mock(Foo()).foo)
<class 'asynctest.mock.CoroutineMock'>
>>> type(asynctest.mock.Mock(Foo()).bar)
<class 'asynctest.mock.Mock'>
The test author can also specify a wrapped object with ``wraps``. In this
case, the :class:`~asynctest.Mock` object behavior is the same as with an
:class:`unittest.mock.Mock` object: the wrapped object may have methods
defined as coroutine functions.
If you want to mock a coroutine function, use :class:`CoroutineMock`
instead.
See :class:`~asynctest.NonCallableMock` for details about :mod:`asynctest`
features, and :mod:`unittest.mock` for the comprehensive documentation
about mocking.
"""
class MagicMock(AsyncMagicMixin, unittest.mock.MagicMock,
metaclass=MockMetaMixin):
"""
Enhance :class:`unittest.mock.MagicMock` so it returns
a :class:`~asynctest.CoroutineMock` object instead of
a :class:`~asynctest.Mock` object where a method on a ``spec`` or
``spec_set`` object is a coroutine.
If you want to mock a coroutine function, use :class:`CoroutineMock`
instead.
:class:`MagicMock` allows to mock ``__aenter__``, ``__aexit__``,
``__aiter__`` and ``__anext__``.
When mocking an asynchronous iterator, you can set the
``return_value`` of ``__aiter__`` to an iterable to define the list of
values to be returned during iteration.
You can not mock ``__await__``. If you want to mock an object implementing
__await__, :class:`CoroutineMock` will likely be sufficient.
see :class:`~asynctest.Mock`.
.. versionadded:: 0.11
support of asynchronous | |
<reponame>benoitc/pypy
from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
from pypy.rpython.memory.gc.base import MovingGCBase
from pypy.rpython.memory.gc import env
from pypy.rlib.debug import ll_assert, have_debug_prints
from pypy.rlib.debug import debug_print, debug_start, debug_stop
from pypy.rpython.memory.support import get_address_stack, get_address_deque
from pypy.rpython.memory.support import AddressDict
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.objectmodel import we_are_translated, running_on_llinterp
from pypy.rpython.lltypesystem import rffi
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import is_valid_int
# Mark'n'compact garbage collector
#
# main point of this GC is to save as much memory as possible
# (not to be worse than semispace), but avoid having peaks of
# memory during collection. Inspired, at least partly by squeak's
# garbage collector
# so, the idea as now is:
# this gc works more or less like semispace, but has some essential
# differencies. The main difference is that we have separate phases of
# marking and assigning pointers, hence order of objects is preserved.
# This means we can reuse the same space, overwriting it as we collect.
# so the algorithm itself is performed in 3 stages (modulo weakrefs and
# finalizers):
# 1. We mark alive objects
# 2. We walk all objects and assign forward pointers in the same order,
# also updating all references
# 3. We compact the space by moving. We use 'arena_new_view' trick, which
# looks like new space to tests, but compiles to the same pointer.
# Also we use raw_memmove in case the object overlaps with its destination.
# After each collection, we bump 'next_collect_after' which is a marker
# where to start each collection. It should be exponential (but less
# than 2) from the size occupied by objects so far.
# field optimization - we don't need forward pointer and flags at the same
# time. Instead we copy the TIDs in a list when we know how many objects are
# alive, and store the forward pointer in the old object header.
first_gcflag_bit = LONG_BIT//2
first_gcflag = 1 << first_gcflag_bit
GCFLAG_HASHTAKEN = first_gcflag << 0 # someone already asked for the hash
GCFLAG_HASHFIELD = first_gcflag << 1 # we have an extra hash field
# note that only the first 2 bits are preserved during a collection!
GCFLAG_MARKBIT = intmask(first_gcflag << (LONG_BIT//2-1))
assert GCFLAG_MARKBIT < 0 # should be 0x80000000
GCFLAG_SAVED_HASHTAKEN = GCFLAG_HASHTAKEN >> first_gcflag_bit
GCFLAG_SAVED_HASHFIELD = GCFLAG_HASHFIELD >> first_gcflag_bit
TID_TYPE = llgroup.HALFWORD
BYTES_PER_TID = rffi.sizeof(TID_TYPE)
TID_BACKUP = rffi.CArray(TID_TYPE)
def translated_to_c():
return we_are_translated() and not running_on_llinterp
class MarkCompactGC(MovingGCBase):
HDR = lltype.Struct('header', ('tid', lltype.Signed))
typeid_is_in_field = 'tid'
withhash_flag_is_in_field = 'tid', GCFLAG_HASHFIELD
# ^^^ all prebuilt objects have GCFLAG_HASHTAKEN, but only some have
# GCFLAG_HASHFIELD (and then they are one word longer).
# The default space size is 1.9375 GB, i.e. almost 2 GB, allocated as
# a big mmap. The process does not actually consume that space until
# needed, of course.
TRANSLATION_PARAMS = {'space_size': int((1 + 15.0/16)*1024*1024*1024),
'min_next_collect_after': 16*1024*1024} # 16MB
malloc_zero_filled = False
inline_simple_malloc = True
inline_simple_malloc_varsize = True
#total_collection_time = 0.0
#total_collection_count = 0
free = NULL
next_collect_after = -1
def __init__(self, config, space_size=4096,
min_next_collect_after=128, **kwds):
import py
py.test.skip("the 'markcompact' gc needs fixing for custom tracers")
#
MovingGCBase.__init__(self, config, **kwds)
self.space_size = space_size
self.min_next_collect_after = min_next_collect_after
def next_collection(self, used_space, num_objects_so_far, requested_size):
used_space += BYTES_PER_TID * num_objects_so_far
ll_assert(used_space <= self.space_size,
"used_space + num_objects_so_far overflow")
try:
next = (used_space // 3) * 2 + requested_size
except OverflowError:
next = self.space_size
if next < self.min_next_collect_after:
next = self.min_next_collect_after
if next > self.space_size - used_space:
next = self.space_size - used_space
# The value we return guarantees that used_space + next <= space_size,
# with 'BYTES_PER_TID*num_objects_so_far' included in used_space.
# Normally, the value we return should also be at least requested_size
# unless we are out of memory.
return next
def setup(self):
envsize = env.read_from_env('PYPY_MARKCOMPACTGC_MAX')
if envsize >= 4096:
self.space_size = envsize & ~4095
mincollect = env.read_from_env('PYPY_MARKCOMPACTGC_MIN')
if mincollect >= 4096:
self.min_next_collect_after = mincollect
#self.program_start_time = time.time()
self.space = llarena.arena_malloc(self.space_size, False)
if not self.space:
raise CannotAllocateGCArena
self.free = self.space
MovingGCBase.setup(self)
self.objects_with_finalizers = self.AddressDeque()
self.tid_backup = lltype.nullptr(TID_BACKUP)
self.next_collect_after = self.next_collection(0, 0, 0)
def init_gc_object(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
hdr.tid = self.combine(typeid16, flags)
def init_gc_object_immortal(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
flags |= GCFLAG_HASHTAKEN | GCFLAG_MARKBIT
# All prebuilt GC objects have the GCFLAG_MARKBIT always set.
# That's convenient to make the GC always think that they
# survive the current collection.
hdr.tid = self.combine(typeid16, flags)
def _get_memory(self, totalsize):
# also counts the space that will be needed during the following
# collection to store the TID
requested_size = raw_malloc_usage(totalsize) + BYTES_PER_TID
self.next_collect_after -= requested_size
if self.next_collect_after < 0:
result = self.obtain_free_space(requested_size)
else:
result = self.free
self.free += totalsize
llarena.arena_reserve(result, totalsize)
return result
_get_memory._always_inline_ = True
def _get_totalsize_var(self, nonvarsize, itemsize, length):
try:
varsize = ovfcheck(itemsize * length)
except OverflowError:
raise MemoryError
# Careful to detect overflows. The following works even if varsize
# is almost equal to sys.maxint; morever, self.space_size is known
# to be at least 4095 bytes smaller than sys.maxint, so this function
# always raises instead of returning an integer >= sys.maxint-4095.
if (raw_malloc_usage(varsize) > self.space_size -
raw_malloc_usage(nonvarsize)):
raise MemoryError
return llarena.round_up_for_allocation(nonvarsize + varsize)
_get_totalsize_var._always_inline_ = True
def _setup_object(self, result, typeid16, has_finalizer):
size_gc_header = self.gcheaderbuilder.size_gc_header
self.init_gc_object(result, typeid16)
if has_finalizer:
self.objects_with_finalizers.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
_setup_object._always_inline_ = True
def malloc_fixedsize(self, typeid16, size,
has_finalizer=False, contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
result = self._get_memory(totalsize)
return self._setup_object(result, typeid16, has_finalizer)
def malloc_fixedsize_clear(self, typeid16, size,
has_finalizer=False, contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
result = self._get_memory(totalsize)
llmemory.raw_memclear(result, totalsize)
return self._setup_object(result, typeid16, has_finalizer)
def malloc_varsize_clear(self, typeid16, length, size, itemsize,
offset_to_length):
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
totalsize = self._get_totalsize_var(nonvarsize, itemsize, length)
result = self._get_memory(totalsize)
llmemory.raw_memclear(result, totalsize)
(result + size_gc_header + offset_to_length).signed[0] = length
return self._setup_object(result, typeid16, False)
def obtain_free_space(self, requested_size):
if self.free == NULL:
return self._emergency_initial_block(requested_size)
while True:
executed_some_finalizers = self.markcompactcollect(requested_size)
self.next_collect_after -= requested_size
if self.next_collect_after >= 0:
break # ok
else:
if executed_some_finalizers:
pass # try again to do a collection
else:
raise MemoryError
return self.free
obtain_free_space._dont_inline_ = True
def _emergency_initial_block(self, requested_size):
# xxx before the GC is fully setup, we might get there. Hopefully
# we will only allocate a couple of strings, e.g. in read_from_env().
# Just allocate them raw and leak them.
debug_start("gc-initial-block")
debug_print("leaking", requested_size, "bytes")
debug_stop("gc-initial-block")
return llmemory.raw_malloc(requested_size)
def collect(self, gen=0):
self.markcompactcollect()
def markcompactcollect(self, requested_size=0):
self.debug_collect_start(requested_size)
self.debug_check_consistency()
#
# Mark alive objects
#
self.to_see = self.AddressDeque()
self.trace_from_roots()
self.to_see.delete()
#
# Prepare new views on the same memory
#
toaddr = llarena.arena_new_view(self.space)
maxnum = self.space_size - (self.free - self.space)
maxnum /= BYTES_PER_TID
llarena.arena_reserve(self.free, llmemory.sizeof(TID_BACKUP, maxnum))
self.tid_backup = llmemory.cast_adr_to_ptr(self.free,
lltype.Ptr(TID_BACKUP))
#
# Walk all objects and assign forward pointers in the same order,
# also updating all references
#
self.update_forward_pointers(toaddr, maxnum)
if (self.run_finalizers.non_empty() or
self.objects_with_finalizers.non_empty()):
self.update_run_finalizers()
self.update_objects_with_id()
self.compact()
#
self.tid_backup = lltype.nullptr(TID_BACKUP)
self.free = self.finaladdr
self.next_collect_after = self.next_collection(self.finaladdr - toaddr,
self.num_alive_objs,
requested_size)
#
if not translated_to_c():
remaining_size = (toaddr + self.space_size) - self.finaladdr
llarena.arena_reset(self.finaladdr, remaining_size, False)
llarena.arena_free(self.space)
self.space = toaddr
#
self.debug_check_consistency()
self.debug_collect_finish()
if self.next_collect_after < 0:
raise MemoryError
#
if self.run_finalizers.non_empty():
self.execute_finalizers()
return True # executed some finalizers
else:
return False # no finalizer executed
def debug_collect_start(self, requested_size):
if 1:# have_debug_prints():
debug_start("gc-collect")
debug_print()
debug_print(".----------- Full collection -------------------")
debug_print("| requested size:",
requested_size)
#start_time = time.time()
#return start_time
#return -1
def debug_collect_finish(self):
if 1:# start_time != -1:
#end_time = time.time()
#elapsed_time = end_time - start_time
#self.total_collection_time += elapsed_time
#self.total_collection_count += 1
#total_program_time = end_time - self.program_start_time
#ct = self.total_collection_time
#cc = self.total_collection_count
#debug_print("| number of collections so far ",
# cc)
debug_print("| total space size ",
self.space_size)
debug_print("| number of objects alive ",
self.num_alive_objs)
debug_print("| used space size ",
self.free - self.space)
debug_print("| next collection after ",
self.next_collect_after)
#debug_print("| total collections per second: ",
# cc / total_program_time)
#debug_print("| total time in markcompact-collect: ",
# ct, "seconds")
#debug_print("| percentage collection<->total time:",
# ct * 100.0 / total_program_time, "%")
debug_print("`----------------------------------------------")
debug_stop("gc-collect")
def update_run_finalizers(self):
if self.run_finalizers.non_empty(): # uncommon case
run_finalizers = self.AddressDeque()
while self.run_finalizers.non_empty():
obj = self.run_finalizers.popleft()
run_finalizers.append(self.get_forwarding_address(obj))
self.run_finalizers.delete()
self.run_finalizers = run_finalizers
#
objects_with_finalizers = self.AddressDeque()
while self.objects_with_finalizers.non_empty():
obj = self.objects_with_finalizers.popleft()
objects_with_finalizers.append(self.get_forwarding_address(obj))
| |
" AACGM: [{:.3f}, {:.3f}, {:d}]\n".format(
self.aacgm_lat[i], self.aacgm_mlt[i], dind)
out += " OCB: [{:.3f}, {:.3f}, {:d}]\n".format(
self.ocb_lat[i], self.ocb_mlt[i], self.ocb_ind[i])
elif self.ocb_ind.shape == ():
for i, dind in enumerate(self.dat_ind):
out += " AACGM: [{:.3f}, {:.3f}, {:d}]\n".format(
self.aacgm_lat[i], self.aacgm_mlt[i], dind)
if self.ocb_lat.shape == () and np.isnan(self.ocb_lat):
out += " OCB: [nan, nan, {:d}]\n".format(
self.ocb_ind)
else:
out += " OCB: [{:.3f}, {:.3f}, {:d}]\n".format(
self.ocb_lat[i], self.ocb_mlt[i], self.ocb_ind)
else:
out += " AACGM: [{:.3f}, {:.3f}, {:d}]\n".format(
self.aacgm_lat, self.aacgm_mlt, self.dat_ind)
for i, oind in enumerate(self.ocb_ind):
out += " OCB: [{:.3f}, {:.3f}, {:d}]\n".format(
self.ocb_lat[i], self.ocb_mlt[i], oind)
out += "\n-------------------------------------------\n"
if self.aacgm_mag.shape == () and self.ocb_mag.shape == ():
out += "Value: Magnitude [N, E, Z]\n"
out += "AACGM: {:.3g} [{:.3g}".format(self.aacgm_mag, self.aacgm_n)
out += ", {:.3g}, {:.3g}]\n".format(self.aacgm_e, self.aacgm_z)
if not np.isnan(self.ocb_mag):
out += " OCB: {:.3g} [{:.3g}".format(self.ocb_mag, self.ocb_n)
out += ", {:.3g}, {:.3g}]\n".format(self.ocb_e, self.ocb_z)
else:
out += "Value: Magnitude [N, E, Z] Index\n"
for i, mag in enumerate(self.ocb_mag):
if self.aacgm_mag.shape == () and i == 0:
out += "AACGM: {:.3g} [".format(self.aacgm_mag)
out += "{:.3g}, {:.3g}, {:.3g}] {:d}\n".format(
self.aacgm_n, self.aacgm_e, self.aacgm_z, self.dat_ind)
elif self.aacgm_mag.shape != ():
out += "AACGM: {:.3g} [".format(self.aacgm_mag[i])
out += "{:.3g}, {:.3g}, {:.3g}] ".format(
self.aacgm_n[i], self.aacgm_e[i], self.aacgm_z[i])
out += "{:d}\n".format(self.dat_ind[i])
if not np.isnan(mag):
out += " OCB: {:.3g} [{:.3g}, ".format(mag, self.ocb_n[i])
out += "{:.3g}, ".format(self.ocb_e[i])
out += "{:.3g}] {:d}\n".format(
self.ocb_z[i], self.ocb_ind if self.ocb_ind.shape == ()
else self.ocb_ind[i])
out += "\n-------------------------------------------\n"
if self.scale_func is None:
out += "No magnitude scaling function provided\n"
else:
out += "Scaling function: {:s}\n".format(self.scale_func.__name__)
return out
def __str__(self):
""" Provide readable representation of the DataVector object
"""
out = self.__repr__()
return out
def set_ocb(self, ocb, scale_func=None):
""" Set the OCBoundary values for this data point
Parameters
----------
ocb : (OCBoundary)
Open Closed Boundary class object
scale_func : (function)
Function for scaling AACGM magnitude with arguements:
[measurement value, mesurement AACGM latitude (degrees),
mesurement OCB latitude (degrees)]
Not necessary if defined earlier or no scaling is needed.
(default=None)
Updates
-------
self.unscaled_r : (float or array-like)
Radius of polar cap in degrees
self.scaled_r : (float)
Radius of normalised OCB polar cap in degrees
self.ocb_n : (float or array-like)
Vector OCB North component
self.ocb_e : (float or array-like)
Vector OCB East component
self.ocb_z : (float or array-like)
Vector OCB vertical component (positive downward)
self.ocb_mag : (float or array-like)
Vector OCB magnitude
self.ocb_lat : (float or array-like)
Vector OCB latitude, if not updated already (degrees)
self.ocb_mlt : (float or array-like)
Vector OCB MLT, if not updated already (hours)
self.r_corr : (float or array-like)
OCB radius correction for vector location (degrees)
self.ocb_quad : (int or array-like)
OCB pole AACGM quadrant
self.vec_quad : (int or array-like)
Vector AACGM quadrant
self.pole_angle : (float or array-like)
Angle at the vector in the triangle formed by the poles and vector
(degrees)
self.aacgm_naz : (float or array-like)
AACGM north azimuth angle (degrees)
self.ocb_aacgm_lat : (float or array-like)
AACGM latitude of the OCB pole (degrees)
self.ocb_aacgm_mlt : (float or array-like)
AACGM MLT of the OCB pole (hours)
self.scale_func : (function)
Function for scaling AACGM magnitude with arguements:
[measurement value, unscaled polar cap radius (degrees),
scaled polar cap radius (degrees)]
Not necessary if defined earlier or if no scaling is needed.
"""
# Initialize the OCB index
ocb.rec_ind = self.ocb_ind
# If the OCB vector coordinates weren't included in the initial info,
# update them here
if(np.all(np.isnan(self.ocb_lat)) or np.all(np.isnan(self.ocb_mlt)) or
np.all(np.isnan(self.r_corr))):
# Because the OCB and AACGM magnetic field are both time dependent,
# can't call this function with multiple OCBs
if self.ocb_ind.shape == ():
(self.ocb_lat, self.ocb_mlt,
self.r_corr) = ocb.normal_coord(self.aacgm_lat,
self.aacgm_mlt)
else:
for i, ocb.rec_ind in enumerate(self.ocb_ind):
if self.ocb_ind.shape == self.dat_ind.shape:
(self.ocb_lat[i], self.ocb_mlt[i],
self.r_corr[i]) = ocb.normal_coord(self.aacgm_lat[i],
self.aacgm_mlt[i])
else:
(self.ocb_lat[i], self.ocb_mlt[i],
self.r_corr[i]) = ocb.normal_coord(self.aacgm_lat,
self.aacgm_mlt)
# Exit if the OCB coordinates can't be calculated at this location
if(np.all(np.isnan(self.ocb_lat)) or np.all(np.isnan(self.ocb_mlt)) or
np.all(np.isnan(self.r_corr))):
return
# Set the AACGM coordinates of the OCB pole
self.unscaled_r = ocb.r[self.ocb_ind] + self.r_corr
self.scaled_r = 90.0 - abs(ocb.boundary_lat)
self.ocb_aacgm_mlt = ocbpy.ocb_time.deg2hr(ocb.phi_cent[self.ocb_ind])
self.ocb_aacgm_lat = 90.0 - ocb.r_cent[self.ocb_ind]
# Get the angle at the data vector appended by the AACGM and OCB poles
self.calc_vec_pole_angle()
# Set the OCB and Vector quadrants
if np.any(~np.isnan(self.pole_angle)):
self.define_quadrants()
# Set the scaling function
if self.scale_func is None:
if scale_func is None:
# This is not necessarily a bad thing, if the value does
# not need to be scaled.
ocbpy.logger.info("no scaling function provided")
else:
self.scale_func = scale_func
# Assign the OCB vector default values and location. Will also
# update the AACGM north azimuth of the vector.
self.scale_vector()
return
def define_quadrants(self):
""" Find the MLT quadrants (in AACGM coordinates) for the OCB pole
and data vector
Requires
--------
self.ocb_aacgm_mlt : (float or array-like)
OCB pole MLT in AACGM coordinates in hours
self.aacgm_mlt : (float or array-like)
Vector AACGM MLT in hours
self.pole_angle : (float or array-like)
vector angle in poles-vector triangle in degrees
Updates
-------
self.ocb_quad : (int or array-like)
OCB pole quadrant
self.vec_quad : (int or array-like)
Vector quadrant
Notes
-----
North (N) and East (E) are defined by the AACGM directions centred on
the data vector location, assuming vertical is positive downwards
Quadrants: 1 [N, E]; 2 [N, W]; 3 [S, W]; 4 [S, E]
Raises
------
ValueError
If the required input is undefined
"""
# Cast the input as arrays
self.ocb_aacgm_mlt = np.asarray(self.ocb_aacgm_mlt)
self.aacgm_mlt = np.asarray(self.aacgm_mlt)
self.pole_angle = np.asarray(self.pole_angle)
# Test input
if np.all(np.isnan(self.ocb_aacgm_mlt)):
raise ValueError("OCB pole location required")
if np.all(np.isnan(self.aacgm_mlt)):
raise ValueError("Vector AACGM location required")
if np.all(np.isnan(self.pole_angle)):
raise ValueError("vector angle in poles-vector triangle required")
# Determine where the OCB pole is relative to the data vector
ocb_adj_mlt = self.ocb_aacgm_mlt - self.aacgm_mlt
neg_mask = (np.less(ocb_adj_mlt, 0.0, where=~np.isnan(ocb_adj_mlt))
& ~np.isnan(ocb_adj_mlt))
while np.any(neg_mask):
if ocb_adj_mlt.shape == ():
ocb_adj_mlt += 24.0
neg_mask = [False]
else:
ocb_adj_mlt[neg_mask] += 24.0
neg_mask = (np.less(ocb_adj_mlt, 0.0,
where=~np.isnan(ocb_adj_mlt))
& ~np.isnan(ocb_adj_mlt))
large_mask = (np.greater_equal(abs(ocb_adj_mlt), 24.0,
where=~np.isnan(ocb_adj_mlt))
& ~np.isnan(ocb_adj_mlt))
if np.any(large_mask):
if ocb_adj_mlt.shape == ():
ocb_adj_mlt -= 24.0 * np.sign(ocb_adj_mlt)
else:
ocb_adj_mlt[large_mask] -= 24.0 * np.sign(
ocb_adj_mlt[large_mask])
# Find the quadrant in which the OCB pole lies
nan_mask = (~np.isnan(self.pole_angle) & ~np.isnan(ocb_adj_mlt))
quad1_mask = (np.less(self.pole_angle, 90.0, where=nan_mask)
& np.less(ocb_adj_mlt, 12.0, where=nan_mask) & nan_mask)
quad2_mask = (np.less(self.pole_angle, 90.0, where=nan_mask)
& np.greater_equal(ocb_adj_mlt, 12.0, where=nan_mask)
& nan_mask)
quad3_mask = (np.greater_equal(self.pole_angle, 90.0, where=nan_mask)
& np.greater_equal(ocb_adj_mlt, 12.0, where=nan_mask)
& nan_mask)
quad4_mask = (np.greater_equal(self.pole_angle, 90.0, where=nan_mask)
& np.less(ocb_adj_mlt, 12.0, where=nan_mask) & nan_mask)
if self.ocb_quad.shape == ():
if np.all(quad1_mask):
self.ocb_quad = np.asarray(1)
elif np.all(quad2_mask):
self.ocb_quad = np.asarray(2)
elif np.all(quad3_mask):
self.ocb_quad = np.asarray(3)
elif np.all(quad4_mask):
self.ocb_quad = np.asarray(4)
else:
self.ocb_quad[quad1_mask] = 1
self.ocb_quad[quad2_mask] = 2
self.ocb_quad[quad3_mask] = 3
self.ocb_quad[quad4_mask] = 4
# Now determine which quadrant the vector is pointed into
nan_mask = (~np.isnan(self.aacgm_n) & ~np.isnan(self.aacgm_e))
quad1_mask = (np.greater_equal(self.aacgm_n, 0.0, where=nan_mask)
& np.greater_equal(self.aacgm_e, 0.0, where=nan_mask)
& nan_mask)
quad2_mask = (np.greater_equal(self.aacgm_n, 0.0, where=nan_mask)
& np.less(self.aacgm_e, 0.0, where=nan_mask) & nan_mask)
quad3_mask = (np.less(self.aacgm_n, 0.0, where=nan_mask)
& np.less(self.aacgm_e, 0.0, where=nan_mask) & nan_mask)
quad4_mask = (np.less(self.aacgm_n, 0.0, where=nan_mask)
& np.greater_equal(self.aacgm_e, 0.0, where=nan_mask)
& nan_mask)
if self.vec_quad.shape == ():
if np.all(quad1_mask):
self.vec_quad = np.asarray(1)
elif np.all(quad2_mask):
self.vec_quad = np.asarray(2)
elif np.all(quad3_mask):
self.vec_quad = np.asarray(3)
elif np.all(quad4_mask):
self.vec_quad = np.asarray(4)
else:
self.vec_quad[quad1_mask] = 1
self.vec_quad[quad2_mask] = 2
self.vec_quad[quad3_mask] = 3
self.vec_quad[quad4_mask] = 4
return
def scale_vector(self):
""" Normalise a variable proportional to the curl of the electric field.
Requires
--------
self.ocb_lat : (float or array-like)
OCB latitude in degrees
self.ocb_mlt : (float or array-like)
OCB MLT in hours
self.ocb_aacgm_mlt : (float or array-like)
OCB pole MLT in AACGM coordinates in hours
self.pole_angle : (float or array-like)
vector angle in poles-vector triangle
Updates
-------
ocb_n : (float or array-like)
OCB scaled north component
ocb_e : (float or array-like)
OCB scaled east component
ocb_z : (float or array-like)
OCB scaled vertical component
ocb_mag : (float or array-like)
OCB scaled magnitude
Raises
------
ValueError
If the required input is not defined
"""
# Ensure the input is array-like
self.ocb_lat = np.asarray(self.ocb_lat)
self.ocb_mlt = np.asarray(self.ocb_mlt)
self.ocb_aacgm_mlt = np.asarray(self.ocb_aacgm_mlt)
self.pole_angle = np.asarray(self.pole_angle)
self.aacgm_n = np.asarray(self.aacgm_n)
self.aacgm_e = | |
<filename>software_kb/merging/populate_from_mentions.py
'''
Populate the staging area graph from the software mention imported documents
'''
import os
import json
from arango import ArangoClient
from populate_staging_area import StagingArea
import logging
import logging.handlers
from tqdm import tqdm
def populate(stagingArea):
database_name_mentions = "mentions"
print("Populate staging area from software mention import")
if not stagingArea.sys_db.has_database(database_name_mentions):
logging.error("Software mention import database does not exist: you need to first import the software mention resources")
stagingArea.db = stagingArea.client.db(database_name_mentions, username=stagingArea.config['arangodb']['arango_user'], password=stagingArea.config['arangodb']['arango_pwd'])
populate_mentions(stagingArea, stagingArea.get_source(database_name_mentions))
def populate_mentions(stagingArea, source_ref):
'''
Software mentions at this stage are all represented as independent software entity (very light-weight and with
the few extracted attributes). The information related to the mention in context are represented with the edge
relation "citations", with a "quotes work" (P6166) property to store the software (=work) mentioned and "quotation"
(P7081) for storing the whole context of mention (the target sentence).
Other relations built are funding (via Crossref funders) and references.
'''
# given the possible number of documents, we use pagination rather than a large ttl
cursor = stagingArea.db.aql.execute(
'FOR doc IN documents RETURN doc', full_count=True
)
stats = cursor.statistics()
total_results = 0
if 'fullCount' in stats:
total_results = stats['fullCount']
page_size = 1000
nb_pages = (total_results // page_size)+1
print("entries:", total_results, ", nb. steps:", nb_pages)
for page_rank in tqdm(range(0, nb_pages)):
cursor = stagingArea.db.aql.execute(
'FOR doc IN documents LIMIT ' + str(page_rank*page_size) + ', ' + str(page_size) + ' RETURN doc', ttl=3600
)
for document in cursor:
# document as document vertex collection
local_doc = stagingArea.init_entity_from_template("document", source=source_ref)
if local_doc is None:
raise("cannot init document entity from default template")
local_doc['_key'] = document["_key"]
local_doc['_id'] = "documents/" + document["_key"]
# document metadata stays as they are (e.g. full CrossRef record)
local_doc['metadata'] = document['metadata']
if "DOI" in document['metadata']:
local_doc['index_doi'] = document['metadata']['DOI'].lower()
# unfortunately the casing of the key DOI field is unreliable
if "doi" in document['metadata']:
local_doc['index_doi'] = document['metadata']['doi'].lower()
if "title" in document['metadata'] and len(document['metadata']['title'])>0 and 'author' in document['metadata'] and len(document['metadata']['author'])>0:
local_title = document['metadata']['title']
local_author = None
if 'author' in document['metadata']:
# we normally always have an author field
local_author = document['metadata']['author']
if local_author != None and local_title != None:
local_title_author_key = stagingArea.title_author_key(local_title, local_author)
if local_title_author_key != None and len(local_title_author_key)>0:
local_doc['index_title_author'] = local_title_author_key
if not stagingArea.staging_graph.has_vertex(local_doc["_id"]):
stagingArea.staging_graph.insert_vertex("documents", local_doc)
# there are two relations to be built at this level:
# - authorship based on "author" metadata field (edge "actor" from "persons" to "documents")
# -> as we consider here text-mined documents, we might better not important every authors as entities at this stage
# and keep only authors from key references cited together with software in mention
# - funding based on crossref "funder" metadata field (edge "funding" from "organizations" to "documents")
'''
if 'funder' in document['metadata'] and len(document['metadata']['funder'])>0:
for funder in document['metadata']['funder']:
# in WorkFunder, funder is defined by 'name', a 'DOI' (uppercase here, related to the funder),
# 'country' (conversion from medline/pubmed)
# funding is defined by 'award' [array] (optional)
# the DOI here contains thefunder id and it should make possible to get a full CrossRef funder
# entry /funders/{id}
# DOI 10.13039/100004440, funder id is 100004440
# https://api.crossref.org/funders/100004440/ -> Wellcome
# apparently 10.13039/ is the prefix for all funders?
funderID = None
if "DOI" in funder:
funderDOI = funder['DOI']
ind = funderDOI.find('/')
if ind != -1:
funderID = funderDOI[ind+1:]
if funderID == None:
continue
# full funder record at Crossref
# Crossref funder ID is P3153
# create an organization entity, if not already present with this funder identifier via P3153
replaced = False
# we check if the organization is not already in the KB, and aggregate/merge with this existing one if yes
cursor = stagingArea.db.aql.execute(
'FOR doc IN organizations FILTER ['+funderID+'] ANY IN doc["claims"]["P3153"][*]["value"] LIMIT 1 RETURN doc'
)
if cursor.count()>0:
existing_organization = cursor.next()
existing_organization = stagingArea.aggregate_with_merge(existing_organization, organization)
#del existing_software["_rev"]
#print(existing_software)
stagingArea.staging_graph.update_vertex(existing_organization)
organization = existing_organization
replaced = True
if not replaced:
# organization as document vertex collection
local_org = stagingArea.init_entity_from_template("organization", source=source_ref)
if local_org is None:
raise("cannot init organization entity from default template")
organization["labels"] = org_name
local_org_id = stagingArea.get_uid()
organization["_key"] = local_org_id
organization["_id"] = "organizations/" + organization["_key"]
stagingArea.staging_graph.insert_vertex("organizations", organization)
# funding relation
relation = {}
relation["claims"] = {}
relation["claims"]['P8324'] = [ {"references": [ source_ref ] } ]
relation["_from"] = organization["_id"]
relation["_to"] = "documents/" + document["_key"]
relation["_id"] = "funding/" + organization["_key"] + "_" + document["_key"]
stagingArea.staging_graph.insert_edge("funding", edge=relation)
'''
# we process all the annotations from this document, which makes possible some (modest) optimizations
cursor_annot = stagingArea.db.aql.execute(
"FOR doc IN annotations FILTER doc.document.$oid == '" + local_doc['_key'] + "' RETURN doc", ttl=60
)
software_name_processed = {}
index_annot = 0
for annotation in cursor_annot:
# annotations from the same document lead to a set of new software entity (to be further disambiguated)
# software with the same name in the same document are considered as the same entity and what is
# extracted for each annotation is aggregated in this single entity
new_entity = False
if not annotation["software-name"]["normalizedForm"] in software_name_processed:
# new entity
software = stagingArea.init_entity_from_template("software", source=source_ref)
if software is None:
raise("cannot init software entity from default template")
software['labels'] = annotation["software-name"]["normalizedForm"]
new_entity = True
else:
# otherwise get the existing entity for this software
software = software_name_processed[annotation["software-name"]["normalizedForm"]]
# version info (P348)
if "version" in annotation and not check_value_exists(software["claims"], "P348", annotation["version"]):
local_value = {}
local_value["value"] = annotation["version"]["normalizedForm"]
local_value["datatype"] = "string"
local_value["references"] = []
local_value["references"].append(source_ref)
if not "P348" in software["claims"]:
software["claims"]["P348"] = []
software["claims"]["P348"].append(local_value)
changed = True
if "publisher" in annotation and not check_value_exists(software["claims"], "P123", annotation["publisher"]):
# publisher (P123)
local_value = {}
local_value["value"] = annotation["publisher"]["normalizedForm"]
local_value["datatype"] = "string"
local_value["references"] = []
local_value["references"].append(source_ref)
if not "P123" in software["claims"]:
software["claims"]["P123"] = []
software["claims"]["P123"].append(local_value)
changed = True
if "url" in annotation and not check_value_exists(software["claims"], "P854", annotation["url"]):
# reference URL (P854)
local_value = {}
local_value["value"] = annotation["url"]["normalizedForm"]
local_value["value"] = local_value["value"].replace(" ", "")
local_value["datatype"] = "url"
local_value["references"] = []
local_value["references"].append(source_ref)
if not "P854" in software["claims"]:
software["claims"]["P854"] = []
software["claims"]["P854"].append(local_value)
changed = True
# the predicted wikidata entity and Wikipedia english page for the software are represented with property
# "said to be the same" (P460), which is defined as "said to be the same as that item, but it's uncertain or disputed"
if "wikipediaExternalRef" in annotation and not check_value_exists(software["claims"], "P460", annotation["wikipediaExternalRef"]):
# imported from Wikimedia project (P143)
local_value = {}
local_value["value"] = "https://en.wikipedia.org/?curid=" + str(annotation["wikipediaExternalRef"])
local_value["datatype"] = "url"
local_value["references"] = []
local_value["references"].append(source_ref)
if not "P460" in software["claims"]:
software["claims"]["P460"] = []
software["claims"]["P460"].append(local_value)
changed = True
if "wikidataId" in annotation and not check_value_exists(software["claims"], "P460", annotation["wikidataId"]):
local_value = {}
local_value["value"] = annotation["wikidataId"]
local_value["datatype"] = "wikibase-item"
local_value["references"] = []
local_value["references"].append(source_ref)
if not "P460" in software["claims"]:
software["claims"]["P460"] = []
software["claims"]["P460"].append(local_value)
software["index_entity"] = annotation["wikidataId"]
changed = True
# bibliographical references associated to the software could be aggregated here, possibly with count information
# -> to be reviewed
if new_entity:
local_id = stagingArea.get_uid()
software['_key'] = local_id
software['_id'] = "software/" + local_id
stagingArea.staging_graph.insert_vertex("software", software)
software_name_processed[annotation["software-name"]["normalizedForm"]] = software
elif changed:
stagingArea.staging_graph.update_vertex(software)
# relations to be built at this level:
# - citations based on software mention in a document, which will include context sentence, coordinates, etc.
# here document are fully specified (with PDF hash, page coordinates, etc.) because it has been "text-mined"
# - references, which relate a software or a document (where the reference is expressed) to a document
# (and less frequently to a software), the document here can be simply a set of bibliographical metadata or
# a fully specified document
relation = stagingArea.init_entity_from_template("citation", source=source_ref)
if relation is None:
raise("cannot init citation relation from default template")
# store original software name string - always present normally
# we use property P6166 ("quote work", here the work is the mentioned software)
if "software-name" in annotation:
local_value = {}
local_value["value"] = annotation["software-name"]["normalizedForm"]
local_value["datatype"] = "string"
local_value["references"] = []
local_value["references"].append(source_ref)
# bounding box in qualifier
# relevant property is "relative position within image" (P2677) | |
= {}
iteration_num = -1
# iterate over cases from end to start, unless we've grabbed values from
# every system
while not self._has_all_values(coord_map):
iteration = self.system_cases._case_keys[iteration_num]
iteration_num -= 1
split_iter = self._split_coordinate(iteration)
iter_key = ':'.join(split_iter)
# if coord_map[iter_key] is False, we haven't grabbed variable values
# from this system
if not coord_map[iter_key]:
coord_map[iter_key] = True
case = self.system_cases.get_case(iteration)
if get_outputs and case.outputs is None:
continue
if not get_outputs and case.inputs is None:
continue
outputs = case.outputs._values if case.outputs is not None else None
residuals = case.residuals._values if case.residuals is not None else None
inputs = case.inputs._values if case.inputs is not None else None
if get_outputs:
for var_name in outputs.dtype.names:
if var_name not in variables:
variables[var_name] = {'value': outputs[var_name]}
if residuals is not None and var_name in residuals.dtype.names:
variables[var_name]['residuals'] = residuals[var_name]
else:
variables[var_name]['residuals'] = 'Not Recorded'
elif inputs is not None:
for var_name in inputs.dtype.names:
if var_name not in variables:
variables[var_name] = {'value': inputs[var_name]}
return variables
def _has_all_values(self, coord_map):
"""
Tell if all variables from every recorded system have been iterated over.
Parameters
----------
coord_map : dict
maps stripped iteration coordinates to a bool indicating whether or not the system(s)
associated with that iteration coordinate have been iterated over.
Returns
-------
bool
True if coord_map is True for each key, False otherwise.
"""
for coord in coord_map:
if not coord_map[coord]:
return False
return True
def _write_outputs(self, in_or_out, comp_type, outputs, hierarchical, print_arrays,
out_stream):
"""
Write table of variable names, values, residuals, and metadata to out_stream.
The output values could actually represent input variables.
In this context, outputs refers to the data that is being logged to an output stream.
Parameters
----------
in_or_out : str, 'input' or 'output'
indicates whether the values passed in are from inputs or output variables.
comp_type : str, 'Explicit' or 'Implicit'
the type of component with the output values.
outputs : list
list of (name, dict of vals and metadata) tuples.
hierarchical : bool
When True, human readable output shows variables in hierarchical format.
print_arrays : bool
When False, in the columnar display, just display norm of any ndarrays with size > 1.
The norm is surrounded by vertical bars to indicate that it is a norm.
When True, also display full values of the ndarray below the row. Format is affected
by the values set with numpy.set_printoptions
Default is False.
out_stream : file-like object
Where to send human readable output.
Set to None to suppress.
"""
if out_stream is None:
return
# Only local metadata but the most complete
meta = self._abs2meta
# Make a dict of outputs. Makes it easier to work with in this method
dict_of_outputs = OrderedDict()
for name, vals in outputs:
dict_of_outputs[name] = vals
allprocs_abs_names = {
'input': dict_of_outputs.keys(),
'output': dict_of_outputs.keys()
}
write_outputs(in_or_out, comp_type, dict_of_outputs, hierarchical, print_arrays, out_stream,
'model', allprocs_abs_names)
class DriverCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver iteration.
Attributes
----------
_var_settings : dict
Dictionary mapping absolute variable names to variable settings.
"""
def __init__(self, filename, format_version, abs2prom, abs2meta, prom2abs, var_settings):
"""
Initialize.
Parameters
----------
filename : str
The name of the recording file from which to instantiate the case reader.
format_version : int
The version of the format assumed when loading the file.
abs2prom : {'input': dict, 'output': dict}
Dictionary mapping absolute names to promoted names.
abs2meta : dict
Dictionary mapping absolute variable names to variable metadata.
prom2abs : {'input': dict, 'output': dict}
Dictionary mapping promoted names to absolute names.
var_settings : dict
Dictionary mapping absolute variable names to variable settings.
"""
super(DriverCases, self).__init__(filename, format_version, abs2prom, abs2meta, prom2abs)
self._var_settings = var_settings
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, inputs, outputs)
Queried SQLite driver table row.
Returns
-------
DriverCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, inputs_text, \
outputs_text, = row
if self.format_version >= 3:
inputs_array = json_to_np_array(inputs_text)
outputs_array = json_to_np_array(outputs_text)
elif self.format_version in (1, 2):
inputs_array = blob_to_array(inputs_text)
outputs_array = blob_to_array(outputs_text)
case = DriverCase(self.filename, counter, iteration_coordinate, timestamp,
success, msg, inputs_array, outputs_array,
self._prom2abs, self._abs2prom, self._abs2meta, self._var_settings)
return case
def load_cases(self):
"""
Load all driver cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_iterations")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id, scaled=False):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
scaled : bool
If True, return variables scaled. Otherwise, return physical values.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
case = self._cases[iteration_coordinate]
else:
# Get an unscaled case if does not already exist in _cases
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_iterations WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case.iteration_coordinate] = case
if scaled:
# We have to do some scaling first before we return it
# Need to make a copy, otherwise we modify the object in the cache
case = deepcopy(case)
case.scale()
return case
class DriverDerivativeCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver derivatives computation.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, totals)
Queried SQLite driver derivatives table row.
Returns
-------
DriverDerivativesCase
Case for associated row.
"""
idx, counter, iteration_coordinate, timestamp, success, msg, totals_blob = row
totals_array = blob_to_array(totals_blob)
case = DriverDerivativesCase(self.filename, counter, iteration_coordinate,
timestamp, success, msg, totals_array,
self._prom2abs, self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all driver cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_derivatives")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_id):
"""
Get a case from the database.
Parameters
----------
case_id : int or str
The integer index or string-identifier of the case to be retrieved.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
iteration_coordinate = self.get_iteration_coordinate(case_id)
if iteration_coordinate in self._cases:
return self._cases[iteration_coordinate]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM driver_derivatives WHERE "
"iteration_coordinate=:iteration_coordinate",
{"iteration_coordinate": iteration_coordinate})
# Initialize the Case object from the iterations data
row = cur.fetchone()
con.close()
case = self._extract_case_from_row(row)
# save so we don't query again
self._cases[case.iteration_coordinate] = case
return case
class ProblemCases(BaseCases):
"""
Case specific to the entries that might be recorded in a Driver iteration.
"""
def _extract_case_from_row(self, row):
"""
Pull data out of a queried SQLite row.
Parameters
----------
row : (id, counter, iter_coordinate, timestamp, success, msg, outputs)
Queried SQLite problems table row.
Returns
-------
ProblemCase
Case for associated row.
"""
idx, counter, case_name, timestamp, success, msg, \
outputs_text, = row
if self.format_version >= 3:
outputs_array = json_to_np_array(outputs_text)
elif self.format_version in (1, 2):
outputs_array = blob_to_array(outputs_text)
case = ProblemCase(self.filename, counter, case_name, timestamp,
success, msg, outputs_array, self._prom2abs,
self._abs2prom, self._abs2meta)
return case
def load_cases(self):
"""
Load all problem cases into memory.
"""
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT * FROM problem_cases")
rows = cur.fetchall()
for row in rows:
case = self._extract_case_from_row(row)
self._cases[case.iteration_coordinate] = case
def get_case(self, case_name):
"""
Get a case from the database.
Parameters
----------
case_name : str
The string-identifier of the case to be retrieved.
Returns
-------
An instance of a Driver Case populated with data from the
specified case/iteration.
"""
# check to see if we've already cached this case
if case_name in self._cases:
return self._cases[case_name]
with sqlite3.connect(self.filename) as con:
cur = con.cursor()
cur.execute("SELECT | |
#coding = utf-8
import json
class MatchDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
# self.seasonId = jsonData["seasonId"] if "seasonId" in jsonData else None
self.queueId = jsonData["queueId"] if "queueId" in jsonData else None
self.gameId = jsonData["gameId"] if "gameId" in jsonData else None
self.participantIdentities = []
if "participantIdentities" in jsonData:
for item in jsonData["participantIdentities"]:
self.participantIdentities.append(ParticipantIdentityDto(item))
# self.gameVersion = jsonData["gameVersion"] if "gameVersion" in jsonData else None
# self.platformId = jsonData["platformId"] if "platformId" in jsonData else None
# self.gameMode = jsonData["gameMode"] if "gameMode" in jsonData else None
# self.mapId = jsonData["mapId"] if "mapId" in jsonData else None
# self.gameType = jsonData["gameType"] if "gameType" in jsonData else None
self.teams = []
if "teams" in jsonData:
for item in jsonData["teams"]:
self.teams.append(TeamStatsDto(item))
self.participants = []
if "participants" in jsonData:
for item in jsonData["participants"]:
self.participants.append(ParticipantDto(item))
self.gameDuration = jsonData["gameDuration"] if "gameDuration" in jsonData else None
self.gameCreation = jsonData["gameCreation"] if "gameCreation" in jsonData else None
def processMatchData(self, targetAccountId):
participantId = 0
for i, v in enumerate(self.participantIdentities):
if v.player.accountId != targetAccountId:
self.participantIdentities[i] = None
else:
participantId = v.participantId
for i, v in enumerate(self.participants):
if v.participantId != participantId:
self.participants[i] = None
class ParticipantIdentityDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
self.player = PlayerDto(jsonData["player"]) if "player" in jsonData else None
self.participantId = jsonData["participantId"] if "participantId" in jsonData else None
return
class PlayerDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
# self.currentPlatformId = jsonData["currentPlatformId"] if "currentPlatformId" in jsonData else None
self.summonerName = jsonData["summonerName"] if "summonerName" in jsonData else None
# self.matchHistoryUri = jsonData["matchHistoryUri"] if "matchHistoryUri" in jsonData else None
# self.platformId = jsonData["platformId"] if "platformId" in jsonData else None
self.currentAccountId = jsonData["currentAccountId"] if "currentAccountId" in jsonData else None
# self.profileIcon = jsonData["profileIcon"] if "profileIcon" in jsonData else None
self.summonerId = jsonData["summonerId"] if "summonerId" in jsonData else None
self.accountId = jsonData["accountId"] if "accountId" in jsonData else None
return
class TeamStatsDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
self.firstDragon = jsonData["firstDragon"] if "firstDragon" in jsonData else None
self.firstInhibitor = jsonData["firstInhibitor"] if "firstInhibitor" in jsonData else None
self.bans = []
if "bans" in jsonData:
for item in jsonData["bans"]:
self.bans.append(TeamBansDto(item))
self.baronKills = jsonData["baronKills"] if "baronKills" in jsonData else None
self.firstRiftHerald = jsonData["firstRiftHerald"] if "firstRiftHerald" in jsonData else None
self.firstBaron = jsonData["firstBaron"] if "firstBaron" in jsonData else None
self.riftHeraldKills = jsonData["riftHeraldKills"] if "riftHeraldKills" in jsonData else None
self.firstBlood = jsonData["firstBlood"] if "firstBlood" in jsonData else None
self.teamId = jsonData["teamId"] if "teamId" in jsonData else None
self.firstTower = jsonData["firstTower"] if "firstTower" in jsonData else None
self.vilemawKills = jsonData["vilemawKills"] if "vilemawKills" in jsonData else None
self.inhibitorKills = jsonData["inhibitorKills"] if "inhibitorKills" in jsonData else None
self.towerKills = jsonData["towerKills"] if "towerKills" in jsonData else None
self.dominionVictoryScore = jsonData["dominionVictoryScore"] if "dominionVictoryScore" in jsonData else None
self.win = jsonData["win"] if "win" in jsonData else None
self.dragonKills = jsonData["dragonKills"] if "dragonKills" in jsonData else None
return
class TeamBansDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
self.pickTurn = jsonData["pickTurn"] if "pickTurn" in jsonData else None
self.championId = jsonData["championId"] if "championId" in jsonData else None
return
class ParticipantDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
self.stats = ParticipantStatsDto(jsonData["stats"]) if "stats" in jsonData else None
self.participantId = jsonData["participantId"] if "stats" in jsonData else None
self.runes = []
if "runes" in jsonData:
for item in jsonData["runes"]:
self.runes.append(RuneDto(item))
self.timeline = ParticipantTimelineDto(jsonData["timeline"]) if "timeline" in jsonData else None
self.teamId = jsonData["teamId"] if "teamId" in jsonData else None
self.spell2Id = jsonData["spell2Id"] if "spell2Id" in jsonData else None
self.masteries = []
if "masteries" in jsonData:
for item in jsonData["masteries"]:
self.masteries.append(MasteryDto(item))
self.highestAchievedSeasonTier = jsonData["highestAchievedSeasonTier"] if "highestAchievedSeasonTier" in jsonData else None
self.spell1Id = jsonData["spell1Id"] if "spell1Id" in jsonData else None
self.championId = jsonData["championId"] if "championId" in jsonData else None
return
class ParticipantStatsDto:
def __init__(self, jsonData):
#self.jsonData = jsonData
self.firstBloodAssist = jsonData["firstBloodAssist"] if "firstBloodAssist" in jsonData else None
self.visionScore = jsonData["visionScore"] if "visionScore" in jsonData else None
self.magicDamageDealtToChampions = jsonData["magicDamageDealtToChampions"] if "magicDamageDealtToChampions" in jsonData else None
self.damageDealtToObjectives = jsonData["damageDealtToObjectives"] if "damageDealtToObjectives" in jsonData else None
self.totalTimeCrowdControlDealt = jsonData["totalTimeCrowdControlDealt"] if "totalTimeCrowdControlDealt" in jsonData else None
self.longestTimeSpentLiving = jsonData["longestTimeSpentLiving"] if "longestTimeSpentLiving" in jsonData else None
# self.perk1Var1 = jsonData["perk1Var1"] if "perk1Var1" in jsonData else None
# self.perk1Var3 = jsonData["perk1Var3"] if "perk1Var3" in jsonData else None
# self.perk1Var2 = jsonData["perk1Var2"] if "perk1Var2" in jsonData else None
self.tripleKills = jsonData["perk1Var2"] if "perk1Var2" in jsonData else None
# self.perk3Var3 = jsonData["perk3Var3"] if "perk3Var3" in jsonData else None
self.nodeNeutralizeAssist = jsonData["nodeNeutralizeAssist"] if "nodeNeutralizeAssist" in jsonData else None
# self.perk3Var2 = jsonData["perk3Var2"] if "perk3Var2" in jsonData else None
# self.playerScore9 = jsonData["playerScore9"] if "playerScore9" in jsonData else None
# self.playerScore8 = jsonData["playerScore8"] if "playerScore8" in jsonData else None
self.kills = jsonData["kills"] if "kills" in jsonData else None
# self.playerScore1 = jsonData["playerScore1"] if "playerScore1" in jsonData else None
# self.playerScore0 = jsonData["playerScore0"] if "playerScore0" in jsonData else None
# self.playerScore3 = jsonData["playerScore3"] if "playerScore3" in jsonData else None
# self.playerScore2 = jsonData["playerScore2"] if "playerScore2" in jsonData else None
# self.playerScore5 = jsonData["playerScore5"] if "playerScore5" in jsonData else None
# self.playerScore4 = jsonData["playerScore4"] if "playerScore4" in jsonData else None
# self.playerScore7 = jsonData["playerScore7"] if "playerScore7" in jsonData else None
# self.playerScore6 = jsonData["playerScore6"] if "playerScore6" in jsonData else None
# self.perk5Var1 = jsonData["perk5Var1"] if "perk5Var1" in jsonData else None
# self.perk5Var3 = jsonData["perk5Var3"] if "perk5Var3" in jsonData else None
# self.perk5Var2 = jsonData["perk5Var2"] if "perk5Var2" in jsonData else None
self.totalScoreRank = jsonData["totalScoreRank"] if "totalScoreRank" in jsonData else None
self.neutralMinionsKilled = jsonData["neutralMinionsKilled"] if "neutralMinionsKilled" in jsonData else None
self.damageDealtToTurrets = jsonData["damageDealtToTurrets"] if "damageDealtToTurrets" in jsonData else None
self.physicalDamageDealtToChampions = jsonData["physicalDamageDealtToChampions"] if "physicalDamageDealtToChampions" in jsonData else None
self.nodeCapture = jsonData["nodeCapture"] if "nodeCapture" in jsonData else None
self.largestMultiKill = jsonData["largestMultiKill"] if "largestMultiKill" in jsonData else None
# self.perk2Var2 = jsonData["perk2Var2"] if "perk2Var2" in jsonData else None
# self.perk2Var3 = jsonData["perk2Var3"] if "perk2Var3" in jsonData else None
self.totalUnitsHealed = jsonData["totalUnitsHealed"] if "totalUnitsHealed" in jsonData else None
# self.perk2Var1 = jsonData["perk2Var1"] if "perk2Var1" in jsonData else None
# self.perk4Var1 = jsonData["perk4Var1"] if "perk4Var1" in jsonData else None
# self.perk4Var2 = jsonData["perk4Var2"] if "perk4Var2" in jsonData else None
# self.perk4Var3 = jsonData["perk4Var3"] if "perk4Var3" in jsonData else None
self.wardsKilled = jsonData["wardsKilled"] if "wardsKilled" in jsonData else None
self.largestCriticalStrike = jsonData["largestCriticalStrike"] if "largestCriticalStrike" in jsonData else None
self.largestKillingSpree = jsonData["largestKillingSpree"] if "largestKillingSpree" in jsonData else None
self.quadraKills = jsonData["quadraKills"] if "quadraKills" in jsonData else None
self.teamObjective = jsonData["teamObjective"] if "teamObjective" in jsonData else None
self.magicDamageDealt = jsonData["magicDamageDealt"] if "magicDamageDealt" in jsonData else None
self.item2 = jsonData["item2"] if "item2" in jsonData else None
self.item3 = jsonData["item3"] if "item3" in jsonData else None
self.item0 = jsonData["item0"] if "item0" in jsonData else None
self.neutralMinionsKilledTeamJungle = jsonData["neutralMinionsKilledTeamJungle"] if "neutralMinionsKilledTeamJungle" in jsonData else None
self.item6 = jsonData["item6"] if "item6" in jsonData else None
self.item4 = jsonData["item4"] if "item4" in jsonData else None
self.item5 = jsonData["item5"] if "item5" in jsonData else None
self.perk1 = jsonData["perk1"] if "perk1" in jsonData else None
self.perk0 = jsonData["perk0"] if "perk0" in jsonData else None
self.perk3 = jsonData["perk3"] if "perk3" in jsonData else None
self.perk2 = jsonData["perk2"] if "perk2" in jsonData else None
self.perk5 = jsonData["perk5"] if "perk5" in jsonData else None
self.perk4 = jsonData["perk4"] if "perk4" in jsonData else None
self.damageSelfMitigated = jsonData["damageSelfMitigated"] if "damageSelfMitigated" in jsonData else None
self.magicalDamageTaken = jsonData["magicalDamageTaken"] if "magicalDamageTaken" in jsonData else None
self.firstInhibitorKill = jsonData["firstInhibitorKill"] if "firstInhibitorKill" in jsonData else None
self.trueDamageTaken = jsonData["trueDamageTaken"] if "trueDamageTaken" in jsonData else None
self.nodeNeutralize = jsonData["nodeNeutralize"] if "nodeNeutralize" in jsonData else None
self.assists = jsonData["assists"] if "assists" in jsonData else None
self.combatPlayerScore = jsonData["combatPlayerScore"] if "combatPlayerScore" in jsonData else None
self.perkPrimaryStyle = jsonData["perkPrimaryStyle"] if "perkPrimaryStyle" in jsonData else None
self.goldSpent = jsonData["goldSpent"] if "goldSpent" in jsonData else None
self.trueDamageDealt = jsonData["trueDamageDealt"] if "trueDamageDealt" in jsonData else None
self.participantId = jsonData["participantId"] if "participantId" in jsonData else None
self.totalDamageTaken = jsonData["totalDamageTaken"] if "totalDamageTaken" in jsonData else None
self.physicalDamageDealt = jsonData["physicalDamageDealt"] if "physicalDamageDealt" in jsonData else None
self.sightWardsBoughtInGame = jsonData["sightWardsBoughtInGame"] if "sightWardsBoughtInGame" in jsonData else None
self.totalDamageDealtToChampions = jsonData["totalDamageDealtToChampions"] if "totalDamageDealtToChampions" in jsonData else None
self.physicalDamageTaken = jsonData["physicalDamageTaken"] if "physicalDamageTaken" in jsonData else None
| |
# -*- coding: utf-8 -*-
import os
import re
import copy
import time
import numpy
import pandas
import shutil
import subprocess
import multiprocessing
from scipy import stats
from ruamel import yaml
# from safirpy.safir_problem_definition import file_0 as spd_version_0
def preprocess_structured_directories(path_work_dir, list_dir_structure):
"""
:param path_work_dir:
:param list_dir_structure:
:return:
EXAMPLE:
>>> path_work_dir = os.path.dirname('root')
>>> list_dir_structure = (1, 3, 2)
>>> print(preprocess_structured_directories(path_work_dir, list_dir_structure))
['root\\0\\0\\0', 'root\\0\\0\\1', 'root\\0\\1\\0', 'root\\0\\1\\1', 'root\\0\\2\\0', 'root\\0\\2\\1']
"""
# =================================================================
# Create a list of directory names and populate with defined number
# =================================================================
list_dir_structure = reversed(list_dir_structure)
# list_dir_structure_2 = [['{:0{len}d}'.format(j, len=int(i / 10 + 1)) for j in list(range(i))] for i in list_dir_structure]
list_dir_structure_2 = []
for i in list_dir_structure:
l_ = []
for j in list(range(i)):
l_.append('{:0{len}d}'.format(j, len=len(str(int(i)))))
list_dir_structure_2.append(l_)
for i in list_dir_structure:
list_dir_structure_2.append(['{:0{len}d}'.format(j, len=int(i / 10 + 1)) for j in list(range(i))])
# ================================
# Create a list of path (relative)
# ================================
list_path = list_dir_structure_2[0]
iter_list_dir_structure_2 = iter(list_dir_structure_2)
next(iter_list_dir_structure_2)
for i, x in enumerate(iter_list_dir_structure_2):
y = copy.copy(list_path)
list_path = []
for xx in x:
for yy in y:
list_path.append(os.path.join(xx, yy))
# ========================
# Convert to absolute path
# ========================
list_path = [os.path.join(path_work_dir, i) for i in list_path]
return list_path
def preprocess_distribute_files(path_files, path_destination_directories):
for path_file in path_files:
if os.path.isfile(path_file):
for path_destination_directory in path_destination_directories:
if os.path.isdir(path_destination_directory):
shutil.copy(path_file, path_destination_directory)
else:
print('ERROR: [{}] IS NOT A DIRECTORY PATH'.format(path_destination_directory))
else:
print('ERROR: [{}] IS NOT A FILE PATH'.format(path_file))
def preprocess_mc_parameters(n_rv, dict_safir_file_param, index_column='index'):
"""
NAME: preprocess_mc_parameters
AUTHOR: <NAME>
DATE: 18 Oct 2018
DESCRIPTION:
Takes a dictionary object with each item represents a safir input variable, distributed or static, distributed
input parameter must be a dictionary object describing a distribution (see usage).
PARAMETERS:
:param n_rv: int, number of random samples for distributed parameters
:param dict_safir_in_param: dict, safir input (problem definition) file parameterised variable names
:param index_column: str, the returned DataFrame object
:return df_params: row equal to n_rv with columns the items in dict_safir_in_param
USAGE:
"""
# declare containers
dict_result_params_static = dict() # container for storing static parameters
dict_result_params_dist = dict() # container for storing distributed random parameters
# populate static parameters and extract
for key_, each_param in dict_safir_file_param.items():
if isinstance(each_param, dict):
dict_result_params_dist[key_] = each_param
else:
if isinstance(each_param, list):
if len(each_param) == n_rv:
dict_result_params_dist[key_] = each_param
else:
dict_result_params_static[key_] = [each_param] * n_rv
# make distribution random parameters
dict_result_params_dist = preprocess_safir_mc_parameters(n_rv, dict_result_params_dist)
# merge random distributed and static parameters
dict_result_params = {**dict_result_params_static, **dict_result_params_dist}
# make pandas.Dataframe
if index_column not in dict_result_params:
dict_result_params[index_column] = list(range(n_rv))
pf_params = pandas.DataFrame(dict_result_params)
pf_params.set_index(index_column, inplace=True)
return pf_params
def preprocess_safir_mc_parameters(n_rv, dict_distribution_params):
"""
:param n_rv: int, number of random variables to be sampled from the distribution
:param dict_distribution_params: dict, describing distributed random parameters, see example format below,
{
'v_1': {'dist_name': 'name_of_dist', 'ubound': 0, 'lbound': 1, 'loc': 0, 'scale': 1, kwargs: dict()},
'v_2': {'dist_name': 'name_of_dist', 'ubound': 0, 'lbound': 1, 'loc': 0, 'scale': 1, kwargs: dict()},
...
}
:return:
{
'v_1': array([1, 2, 3, ...]),
'v_2': array([1, 2, 3, ...]),
...
}
"""
dict_scipy_dist = {
'alpha': stats.alpha,
'anglit': stats.anglit,
'arcsine': stats.arcsine,
'beta': stats.beta,
'betaprime': stats.betaprime,
'bradford': stats.bradford,
'burr': stats.burr,
'cauchy': stats.cauchy,
'chi': stats.chi,
'chi2': stats.chi2,
'cosine': stats.cosine,
'dgamma': stats.dgamma,
'dweibull': stats.dweibull,
'erlang': stats.erlang,
'expon': stats.expon,
'exponnorm': stats.exponnorm,
'exponweib': stats.exponweib,
'exponpow': stats.exponpow,
'f': stats.f,
'fatiguelife': stats.fatiguelife,
'fisk': stats.fisk,
'foldcauchy': stats.foldcauchy,
'foldnorm': stats.foldnorm,
'frechet_r': stats.frechet_r,
'frechet_l': stats.frechet_l,
'genlogistic': stats.genlogistic,
'genpareto': stats.genpareto,
'gennorm': stats.gennorm,
'genexpon': stats.genexpon,
'genextreme': stats.genextreme,
'gausshyper': stats.gausshyper,
'gamma': stats.gamma,
'gengamma': stats.gengamma,
'genhalflogistic': stats.genhalflogistic,
'gilbrat': stats.gilbrat,
'gompertz': stats.gompertz,
'gumbel_r': stats.gumbel_r,
'gumbel_l': stats.gumbel_l,
'halfcauchy': stats.halfcauchy,
'halflogistic': stats.halflogistic,
'halfnorm': stats.halfnorm,
'halfgennorm': stats.halfgennorm,
'hypsecant': stats.hypsecant,
'invgamma': stats.invgamma,
'invgauss': stats.invgauss,
'invweibull': stats.invweibull,
'johnsonsb': stats.johnsonsb,
'johnsonsu': stats.johnsonsu,
'ksone': stats.ksone,
'kstwobign': stats.kstwobign,
'laplace': stats.laplace,
'levy': stats.levy,
'levy_l': stats.levy_l,
'levy_stable': stats.levy_stable,
'logistic': stats.logistic,
'loggamma': stats.loggamma,
'loglaplace': stats.loglaplace,
'lognorm': stats.lognorm,
'lomax': stats.lomax,
'maxwell': stats.maxwell,
'mielke': stats.mielke,
'nakagami': stats.nakagami,
'ncx2': stats.ncx2,
'ncf': stats.ncf,
'nct': stats.nct,
'norm': stats.norm,
'pareto': stats.pareto,
'pearson3': stats.pearson3,
'powerlaw': stats.powerlaw,
'powerlognorm': stats.powerlognorm,
'powernorm': stats.powernorm,
'rdist': stats.rdist,
'reciprocal': stats.reciprocal,
'rayleigh': stats.rayleigh,
'rice': stats.rice,
'recipinvgauss': stats.recipinvgauss,
'semicircular': stats.semicircular,
't': stats.t,
'triang': stats.triang,
'truncexpon': stats.truncexpon,
'truncnorm': stats.truncnorm,
'tukeylambda': stats.tukeylambda,
'uniform': stats.uniform,
'vonmises': stats.vonmises,
'vonmises_line': stats.vonmises_line,
'wald': stats.wald,
'weibull_min': stats.weibull_min,
'weibull_max': stats.weibull_max,
'wrapcauchy': stats.wrapcauchy,
}
dict_sampled_random_values = dict()
for each_variable_name, val in dict_distribution_params.items():
# Some intermediate variables
dist = dict_scipy_dist[val['dist_name']]
lbound, ubound, loc, scale = val['lbound'], val['ubound'], val['loc'], val['scale']
# Assign additional variables defined in kwargs
if 'kwargs' in val:
kwargs = val['kwargs']
else:
kwargs = dict()
# Generate a linearly spaced array within lower and upper boundary of the cumulative probability density.
sampled_cfd = numpy.linspace(
dist.cdf(x=lbound, loc=loc, scale=scale, **kwargs),
dist.cdf(x=ubound, loc=loc, scale=scale, **kwargs),
n_rv
)
# Sample distribution
sampled_random_values = dist.ppf(
q=sampled_cfd,
loc=loc,
scale=scale,
**kwargs
)
# Inject key and sampled rv into new dictionary, i.e. the dictionary will be returned with sampled values
dict_sampled_random_values[each_variable_name] = sampled_random_values
numpy.random.shuffle(dict_sampled_random_values[each_variable_name])
return dict_sampled_random_values
def preprocess_mc_parameters_host(path_safir_mc_param_csv=None, dict_safir_params=None, n_rv=None):
# check whether to make MC parameters or obtain from .csv file provided
# todo
if path_safir_mc_param_csv is not None:
path_safir_mc_param_csv = os.path.realpath(path_safir_mc_param_csv)
if os.path.isfile(path_safir_mc_param_csv):
df_safir_mc_param = pandas.read_csv(path_safir_mc_param_csv).set_index('index')
else:
raise ValueError('ERROR! Files does not exist, check input file: {}'.format(path_safir_mc_param_csv))
else:
df_safir_mc_param = preprocess_mc_parameters(n_rv=n_rv, dict_safir_file_param=dict_safir_params)
df_safir_mc_param = df_safir_mc_param.sample(frac=1).reset_index(drop=True)
return df_safir_mc_param
def safir_mc_mp(
list_kwargs,
calc_worker,
n_proc=1,
mp_maxtasksperchild=1000,
progress_print_sleep=2
):
time_simulation_start = time.perf_counter()
m = multiprocessing.Manager()
mp_q = m.Queue()
p = multiprocessing.Pool(n_proc, maxtasksperchild=mp_maxtasksperchild)
jobs = p.map_async(calc_worker, [(kwargs, mp_q) for kwargs in list_kwargs])
n_simulations = len(list_kwargs)
n_steps = 60 # length of the progress bar
while progress_print_sleep:
time_consumed = time.perf_counter() - time_simulation_start
if time_consumed > 60*60*24:
time_consumed /= 60*60*24
str_fmt = "| {}>{} |{:03.1f}% {:05.1f}d"
elif time_consumed > 60*60:
time_consumed /= 60*60
str_fmt = "| {}>{} |{:03.1f}% {:05.1f}h"
elif time_consumed > 60:
time_consumed /= 60
str_fmt = "| {}>{} |{:03.1f}% {:05.1f}m"
else:
str_fmt = "| {}>{} |{:03.1f}% {:05.1f}s"
if jobs.ready():
print(str_fmt.format('=' * round(n_steps), '-' * 0, 100, time_consumed))
break
else:
p_ = mp_q.qsize() / n_simulations * n_steps
print(str_fmt.format('=' * int(p_), '-' * int(n_steps - p_), p_/n_steps * 100, time_consumed), end='\r')
time.sleep(progress_print_sleep)
p.close()
p.join()
return jobs.get()
def safir_problem_definition_protobuf(str_parameterised_problem_definition, dict_safir_params):
"""
:param str_parameterised_problem_definition:
:param kwargs:
:return:
"""
dict_safir_param_ = dict()
for k, v in dict_safir_params.items():
if isinstance(v, int) or isinstance(v, float):
dict_safir_param_[k] = '{:.3e}'.format(v)
elif isinstance(v, str):
dict_safir_param_[k] = '{}'.format(v)
str_temp_problem_definition = str_parameterised_problem_definition.format(**dict_safir_param_)
return str_temp_problem_definition
def safir_process(path_problem_definition, path_safir_exe, timeout_subprocess=3600):
""""""
# ===============================================
# Make path_problem_definition a list, not string
# ===============================================
if type(path_problem_definition) is str:
path_problem_definition = [path_problem_definition]
# ============================================
# Iterate and run all problem definition files
# ============================================
for each_path_problem_definition in path_problem_definition:
# change the current working directory to the input file directory in order
os.chdir(os.path.dirname(each_path_problem_definition))
# check no *.in suffix for SAFIR problem definition file
each_path_problem_definition = each_path_problem_definition.split('.')[0]
# make process input arguments, for SAFIR
args = [path_safir_exe, each_path_problem_definition]
# run SAFIR
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# error handling in case timeout, cache standard output
try:
o, e = p.communicate(timeout=timeout_subprocess)
except subprocess.TimeoutExpired:
o = b'ERROR: SUBPROCESS TIMEOUT EXPIRED'
p.terminate()
# save cached standard output bytes object to a *.stdout file, * is the problem definition file name - suffix
with open('{}.stdout'.format(os.path.basename(each_path_problem_definition)), 'w') as f_stdout:
f_stdout.write(o.decode('utf-8'))
def safir_seek_convergence_worker(arg):
dict_safir_in_params, q = arg
result = safir_seek_convergence(**dict_safir_in_params)
q.put('0')
return result
def safir_seek_convergence(
path_work_directory,
path_safir_exe,
dict_safir_in_files_strings,
dict_safir_params,
seek_time_convergence_target,
seek_load_lbound,
seek_load_ubound,
seek_load_sign,
seek_time_convergence_target_tol=None,
seek_delta_load_target=None,
seek_iteration_max=20,
):
# =======================================================
# Initial conditions and intermediate variable definition
# =======================================================
# Initial condition
seek_status_converge_on_time = False
seek_status_converge_on_delta_load = False
seek_status_max_iteration_exceeded = False
n_iteration = 0
time_convergence = numpy.nan
try:
os.makedirs(path_work_directory)
except FileExistsError:
pass
# calculate time convergence target range, (lower, upper)
try:
seek_time_convergence_target_range = [seek_time_convergence_target + seek_time_convergence_target_tol,
seek_time_convergence_target - seek_time_convergence_target_tol]
seek_time_convergence_target_range = (min(*seek_time_convergence_target_range),
max(*seek_time_convergence_target_range))
except TypeError:
seek_time_convergence_target_range = [seek_time_convergence_target] * 2
# validate seek goal condition
if seek_time_convergence_target_tol is None and seek_delta_load_target is None:
return -1
# data containers
list_load = [numpy.nan, numpy.nan, numpy.nan]
list_time = []
# ===============
# Seeking process
# ===============
while True:
if time_convergence < seek_time_convergence_target_range[0]:
# structure is too weak, make load less by decreasing upper limit seek_load_ubound
seek_load_ubound = (seek_load_ubound + seek_load_lbound) / 2
elif time_convergence > seek_time_convergence_target_range[1]:
# structure is too strong, make load more by increasing lower limit seek_load_lbound
seek_load_lbound = (seek_load_ubound + seek_load_lbound) / 2
# -------------------
# Prepare SAFIR files
# -------------------
path_target_problem_definition = None
| |
<gh_stars>1-10
import sys
sys.path.append('/opt/conda/lib/python3.7/site-packages')
bd2 = '/home/jupyter/'
from whacc import utils, image_tools, transfer_learning, analysis
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow import keras
import tensorflow as tf
from sklearn.utils import class_weight
import time
from pathlib import Path
import os
import copy
import numpy as np
from tensorflow.keras import applications
from pathlib import Path
import shutil
import zipfile
from datetime import datetime
import pytz
import json
# In[16]:
def build_model(info_dict, labels, model_name_str, base_learning_rate=0.00001, dropout_val=None, class_numbers=None,
IMG_SIZE=96):
if class_numbers is None:
class_numbers = np.unique(labels)
num_classes = len(class_numbers)
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
model_function = eval('applications.' + model_name_str)
base_model = model_function(input_shape=IMG_SHAPE, weights='imagenet', include_top=False)
base_model.trainable = True
x = base_model.output
x = keras.layers.GlobalAveragePooling2D()(x) # global spatial average pooling layer
x = Dense(2048, activation='relu')(x) # fully-connected layer
if dropout_val is not None:
x = Dropout(dropout_val)(x)
###### i need to name the layers
if num_classes == 2:
predictions = Dense(1, activation='sigmoid')(x) # fully connected output/classification layer
else:
predictions = Dense(num_classes, activation='softmax')(x) # fully connected output/classification layer
model = Model(inputs=base_model.input, outputs=predictions)
if num_classes == 2:
optimizer = keras.optimizers.RMSprop(learning_rate=base_learning_rate)
loss = keras.losses.BinaryCrossentropy()
metrics = [keras.metrics.BinaryAccuracy(name="bool_acc", threshold=0.5),
keras.metrics.AUC(name='auc')]
else:
optimizer = keras.optimizers.Adam(learning_rate=base_learning_rate)
loss = keras.losses.SparseCategoricalCrossentropy()
metrics = [keras.metrics.SparseCategoricalAccuracy(name='acc')]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
rebalance = class_weight.compute_class_weight('balanced', classes=class_numbers, y=labels.flatten())
class_weights = {i: rebalance[i] for i in class_numbers}
wrap_vars_list = ['class_numbers',
'num_classes',
'base_learning_rate',
'model_name_str',
'IMG_SIZE',
'dropout_val']
for k in wrap_vars_list:
info_dict[k] = eval(k)
info_dict['class_weights'] = str(class_weights)
return model, class_weights, info_dict
def label_naming_shorthand_dict(name_key=None):
label_naming_shorthand_dict = {
'[0, 1, 2, 3, 4, 5]- (no touch, touch, onset, one after onset, offset, one after offset)': 'on-off_set_and_one_after',
'[0, 1, 2, 3]- (no touch, touch, onset, offset': 'on-off_set',
'[0, 1, 2]- (no event, onset, offset)': 'only_on-off_set',
'[0, 1]- (no touch, touch)': 'regular',
'[0, 1]- (not offset, offset)': 'only_offset',
'[0, 1]- (not onset, onset)': 'only_onset'}
if name_key is None:
return label_naming_shorthand_dict
else:
return label_naming_shorthand_dict[name_key]
def info_dict_wrapper(info_dict, local_dict):
for k in local_dict.keys():
info_dict[k] = local_dict[k]
return info_dict
def make_initial_folder(all_models_directory, unique_h5_train_val_dir):
single_frame_dir = all_models_directory + os.sep + unique_h5_train_val_dir + os.sep + 'data' + os.sep + 'single_frame'
Path(single_frame_dir).mkdir(parents=True, exist_ok=True)
return locals()
# return single_frame_dir
def get_automated_model_info(BASE_H5, image_source_h5_directory_ending, test_data_dir, data_string_key = "data"):
tz = pytz.timezone('America/Los_Angeles')
loc_dt = pytz.utc.localize(datetime.utcnow())
LA_TIME = loc_dt.astimezone(tz)
todays_version = LA_TIME.strftime("%Y_%m_%d_%H_%M_%S")
del tz
del loc_dt
del LA_TIME
a = os.sep
base_data_dir = BASE_H5 + a + data_string_key + a
base_dir_all_h5s = BASE_H5 + a + data_string_key + a + 'single_frame' + a
data_dir = base_data_dir + image_source_h5_directory_ending
print('\nFOR IMAGES, 0 is train set, 1 is val set')
print(data_dir)
image_h5_list = utils.get_h5s(data_dir)
h5_train = image_h5_list[0]
h5_val = image_h5_list[1]
labels_dir = base_data_dir + a + "ALT_LABELS" + a
print('\nFOR LABELS,0 is train set, 1 is val set')
label_h5_list = utils.get_h5s(labels_dir)
print('\nSelect from the following label structures...')
label_key_name_list = utils.print_h5_keys(label_h5_list[0], return_list=True)
h5_test_labels = utils.get_h5s(test_data_dir + a + "ALT_LABELS" + a, print_h5_list=False)[0]
h5_test = utils.get_h5s(test_data_dir + a + image_source_h5_directory_ending + a, print_h5_list=False)[0]
return locals()
def copy_over_new_labels(label_key_name, image_h5_list, label_h5_list):
label_key_shorthand = label_naming_shorthand_dict(label_key_name)
for img_src, lab_src in zip(image_h5_list, label_h5_list):
utils.copy_h5_key_to_another_h5(lab_src, img_src, label_key_name, 'labels')
return locals()
# get list of pre trained models to choose from
def get_keras_model_names():
names_, types_ = utils.get_class_info(applications, return_name_and_type=True)
model_names = np.asarray(names_)['function' == np.asarray(types_)]
utils.print_list_with_inds(model_names)
return model_names
def make_model_save_directory(info_dict, make_folder=True):
naming_list = ['model_name_str', 'image_source_h5_directory_ending', 'label_key_shorthand', 'todays_version']
model_save_dir: str = copy.deepcopy(info_dict['BASE_H5'])
for k in naming_list:
model_save_dir += os.sep + info_dict[k] + os.sep
info_dict['model_save_dir'] = model_save_dir
info_dict['model_save_dir_checkpoints'] = model_save_dir + os.sep + 'checkpoints'
if make_folder:
Path(info_dict['model_save_dir_checkpoints']).mkdir(parents=True, exist_ok=True)
return model_save_dir
def basic_callbacks(save_checkpoint_filepath, monitor='val_loss', patience=10,
save_best_only=False, save_weights_only=True, save_freq="epoch", period = 1):
callbacks = []
callbacks.append(keras.callbacks.EarlyStopping(monitor=monitor, patience=patience))
add_path_name = "{loss:.8f}_{epoch:04d}_cp.hdf5"
add_path_name = "{epoch:04d}_cp.hdf5"
callbacks.append(keras.callbacks.ModelCheckpoint(
save_checkpoint_filepath + os.sep + add_path_name,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
save_freq=save_freq,
period=period))
return callbacks
def unzip_and_place_h5s(bd, do_delete_zips = False):
bd2= ''.join(bd.split('gdrive/My Drive'))
shutil.copytree(bd, bd2)
a = utils.get_files(bd2, '*.zip')
for k in a:
with zipfile.ZipFile(k, 'r') as zip_ref:
zip_ref.extractall(os.path.dirname(k))
if do_delete_zips:
os.remove(k)
def change_to_local_dir(bd):
bd2= ''.join(bd.split('gdrive/My Drive'))
return bd2
def change_to_gdrive_dir(bd):
bd2= '/content/gdrive/My Drive/'.join(bd.split('/content/'))
return bd2
def pack_training_info(save_and_plot_history_var):
training_info = dict()
for k in ['L', 'all_logs', 'all_colors', 'logs_names', 'markers', 'matching_inds']:
training_info[k] = eval('save_and_plot_history_var.' + k)
for k in training_info.keys():
if 'numpy.ndarray' in str(type(training_info[k])):
training_info[k] = training_info[k].tolist()
return training_info
def save_info_dict(info_dict_tmp):
for k in info_dict_tmp.keys():
if 'numpy.ndarray' in str(type(info_dict_tmp[k])):
info_dict_tmp[k] = info_dict_tmp[k].tolist()
with open(info_dict_tmp['model_save_dir'] + 'info_dict' + '.json', 'w') as f:
json.dump(info_dict_tmp, f)
def foo_save_and_plot(training_info, save_and_plot_history_1, save_loc):
text_str = transfer_learning.make_text_for_fig(training_info)
training_info = transfer_learning.pack_training_info(training_info, save_and_plot_history_1)
# save_num = foo_get_next_save_num(save_loc)
# make figure and save
transfer_learning.plot_train_hist(training_info, [1], [.9, 1], text_str)
plt.savefig(save_loc + 'mod_test_fig_ACC' + '.png', bbox_inches="tight")
transfer_learning.plot_train_hist(training_info, [0], [0, 0.25], text_str)
plt.savefig(save_loc + 'mod_test_fig_LOSS' + '.png', bbox_inches="tight")
# save training info
with open(save_loc + 'model_eval_each_epoch' + '.json', 'w') as f:
json.dump(training_info, f)
# In[17]:
def foo_run_all():
#copy and unzip for colab
global all_models_directory, test_data_dir, info_dict, transfer_learning
local_dict = make_initial_folder(all_models_directory,
unique_h5_train_val_dir) # make data single frame directory and get that directory
info_dict = info_dict_wrapper(info_dict, local_dict) # wrap output into a dict
BASE_H5 = info_dict['all_models_directory'] + os.sep + info_dict['unique_h5_train_val_dir'] # directory for all the data for a certain type og images(lag or regular etc)
local_dict = get_automated_model_info(BASE_H5, image_source_h5_directory_ending, test_data_dir) # basic data like directories and train and val set (automated form the directory)
info_dict = info_dict_wrapper(info_dict, local_dict) # wrap output into a dict
# """$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""
local_dict = copy_over_new_labels(label_key_name, info_dict['image_h5_list'] + [info_dict['h5_test']],
info_dict['label_h5_list'] + [info_dict[ 'h5_test_labels']]) # copy specific labels to the H5 of interest (this is all after the conversion from single frame)
info_dict = info_dict_wrapper(info_dict, local_dict) # wrap output into a dict
# """$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ make model $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""
# get all the labels
train_gen = image_tools.ImageBatchGenerator(batch_size, [info_dict['h5_train']])
val_gen = image_tools.ImageBatchGenerator(batch_size, [info_dict['h5_val']])
test_gen = image_tools.ImageBatchGenerator(batch_size, [info_dict['h5_test']])
train_labels = image_tools.get_h5_key_and_concatenate([info_dict['h5_train']])
val_labels = image_tools.get_h5_key_and_concatenate([info_dict['h5_val']])
labels = np.concatenate((train_labels, val_labels))
model, class_weights, info_dict = build_model(info_dict, labels, model_name_str,
base_learning_rate=base_learning_rate,
dropout_val=dropout_val)
# info_dict['BASE_H5'] = change_to_gdrive_dir(info_dict['BASE_H5'])
model_save_dir = make_model_save_directory(info_dict) # make a unique folder using standard folder struct ending in a unique date/time folder
# ###change directory to gdrive in case it crashes
# dir_2_change = ['model_save_dir_checkpoints', 'model_save_dir']
# for k in dir_2_change:
# info_dict[k] = change_to_gdrive_dir(info_dict[k])
model, class_weights, info_dict = build_model(info_dict,
labels,
model_name_str,
base_learning_rate=base_learning_rate,
dropout_val=dropout_val)
callbacks = basic_callbacks(info_dict['model_save_dir_checkpoints'], monitor=monitor, patience=patience,
save_best_only=save_best_only, save_weights_only=True,
save_freq=save_freq, period=period)
plot_callback = transfer_learning.save_and_plot_history(test_gen, key_name='labels', plot_metric_inds=[0])
callbacks.append(plot_callback)
###FITTTTTTT
history = model.fit(train_gen,
epochs=epochs,
validation_data=val_gen,
callbacks=callbacks,
class_weight=class_weights)
#save finals checkpoint after model finishes
model.save_weights(info_dict['model_save_dir_checkpoints']+os.sep+'final_epoch_cp.hdf5')
training_info = pack_training_info(plot_callback)
xx = '/content/colab_data2/model_testing/all_data/all_models/small_h5s/InceptionV3/3lag/on-off_set_and_one_after/'
utils.get_files(xx, '*_cp.hdf5')
transfer_learning.foo_save_and_plot
foo_save_and_plot(training_info, plot_callback, info_dict['model_save_dir'])
save_info_dict(info_dict)
### dont need to copy we changed the directory above
# x1 = info_dict['model_save_dir']
# x2 = change_to_gdrive_dir(x1)
# shutil.copytree(x1, x2)
# In[18]:
## start
label_key_name_list = label_naming_shorthand_dict() # get a list of label key names... they are really long to be specific
utils.print_list_with_inds(label_key_name_list) # print them, then below use their index to choose them
all_models_directory = bd2+"model_testing/all_data/all_models/" # DETERMINES location for all model you will run
test_data_dir = bd2+"model_testing/all_data/test_data/10_percent_holy_set/" # ^^^^^^^DETERMINES^^^^^^^ location for test data (folder determined by the "image_source_h5_directory_ending" variable)
unique_h5_train_val_dir = 'regular_80_border' # ^^^^^^^DETERMINES^^^^^^^ the name of the folder where each type of data is stored
image_source_h5_directory_ending = "/3lag/" # ^^^^^^^DETERMINES^^^^^^^ THE IMAGE SOURCE
label_key_name = list(label_key_name_list.keys())[2] # ^^^^^^^DETERMINES^^^^^^^ THE LABEL SOURCE choose the ind based on the print out
re_copy_and_unzip = True
model_name_str = 'ResNet50V2' # ^^^^^^^DETERMINES^^^^^^^ model base
base_learning_rate = 10**-6 # DETERMINES rate of change for each epoch step
dropout_val = 0.5 # DETERMINES percentage of dropout for training data
patience = 15 # DETERMINES early stopping
save_freq = "epoch" # leave this as epoch
period = 2 # DETERMINES how often it saves the checkpoints
epochs = 5000 # DETERMINES how many epochs the model trains for if early stopping is never triggered
batch_size = 200 # DETERMINES number of images per batch
save_best_only = True
monitor = 'val_loss'
info_dict = dict()
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
from twilio.rest import Client
def send_text(mess):
account_sid = "AC403675d69d88a93207f1cb80e9187c5e"
auth_token = '<PASSWORD>'
client = Client(account_sid, auth_token)
message = client.messages .create(
body = mess, #Message you send
from_ = "+19199754333",#Provided phone number
to = "+15023109622")#Your phone number
message.sid
label_ind = 0
unique_h5_train_val_dir = 'regular_80_border' # ^^^^^^^DETERMINES^^^^^^^ the name of the folder where each type of data is stored
image_source_h5_directory_ending = "/3lag/" # ^^^^^^^DETERMINES^^^^^^^ THE IMAGE SOURCE
# 0 [0, 1, 2, 3, 4, 5]- (no touch, touch, onset, one after onset, offset, one after offset)
# 1 [0, 1, 2, 3]- (no touch, touch, onset, offset
# 2 [0, 1, 2]- (no event,from twilio.rest import Client
#
# def send_text(mess):
# account_sid = "AC403675d69d88a93207f1cb80e9187c5e"
# auth_token = '<PASSWORD>'
# client = Client(account_sid, auth_token)
# message = client.messages .create(
# body = mess, #Message | |
<gh_stars>100-1000
from monk.tf_keras_1.finetune.imports import *
from monk.system.imports import *
from monk.tf_keras_1.finetune.level_14_master_main import prototype_master
class prototype(prototype_master):
'''
Main class for Mxnet Backend
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
self.system_dict["library"] = "Keras";
self.custom_print("Keras Version: {}".format(keras.__version__));
self.custom_print("Tensorflow Version: {}".format(tf.__version__));
self.custom_print("");
###############################################################################################################################################
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]],
eval_infer=None, resume_train=None, copy_from=None, pseudo_copy_from=None, summary=None, post_trace=False)
@accepts("self", str, str, eval_infer=bool, resume_train=bool, copy_from=[list, bool], pseudo_copy_from=[list, bool], summary=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Prototype(self, project_name, experiment_name, eval_infer=False, resume_train=False, copy_from=False, pseudo_copy_from=False, summary=False):
'''
Create project and experiment for instantiation and running the experiments
Args:
project_name (str): Project Name
experiment_name (str): Experiment Name
eval_infer (bool): If set as True, model is loaded in evaluation mode
resume_train (bool): If set as True, model is loaded from last checkpoint
copy_from (list): [project, experiment] to copy from
pseudo_copy_from (list): For creating sub-experiments while in hyper-parametric analysis state
summary (list): Dummy variable
Returns:
None
'''
self.set_system_project(project_name);
self.set_system_experiment(experiment_name, eval_infer=eval_infer, resume_train=resume_train, copy_from=copy_from,
pseudo_copy_from=pseudo_copy_from, summary=summary);
self.custom_print("Experiment Details");
self.custom_print(" Project: {}".format(self.system_dict["project_name"]));
self.custom_print(" Experiment: {}".format(self.system_dict["experiment_name"]));
self.custom_print(" Dir: {}".format(self.system_dict["experiment_dir"]));
self.custom_print("");
################################################################################################################################################
###############################################################################################################################################
@warning_checks(None, dataset_path=None, path_to_csv=None, delimiter=None,
model_name=None, freeze_base_network=None, num_epochs=["lt", 100], post_trace=False)
@error_checks(None, dataset_path=["folder", "r"], path_to_csv=["file", "r"], delimiter=["in", [",", ";", "-", " "]],
model_name=None, freeze_base_network=None, num_epochs=["gte", 1], post_trace=False)
@accepts("self", dataset_path=[str, list, bool], path_to_csv=[str, list, bool], delimiter=str,
model_name=str, freeze_base_network=bool, num_epochs=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Default(self, dataset_path=False, path_to_csv=False, delimiter=",", model_name="resnet18_v1", freeze_base_network=True, num_epochs=10):
'''
Use monk in default (quick prototyping) mode
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
delimiter (str): Delimiter for csv file
model_name (str): Base model name
freeze_base_network (bool): If True base network is freezed
num_epochs (int): Number of epochs to train the data
Returns:
None
'''
if(self.system_dict["states"]["eval_infer"]):
self.Dataset_Params(dataset_path=dataset_path, import_as_csv=import_as_csv, path_to_csv=path_to_csv, delimiter=delimiter);
self.Dataset();
else:
input_size=224;
self.Dataset_Params(dataset_path=dataset_path, path_to_csv=path_to_csv, delimiter=delimiter,
split=0.7, input_size=input_size, batch_size=4, shuffle_data=True, num_processors=psutil.cpu_count());
self.apply_random_horizontal_flip(probability=0.8, train=True, val=True);
self.apply_mean_subtraction(mean=[0.485, 0.456, 0.406], train=True, val=True, test=True);
self.Dataset();
self.Model_Params(model_name=model_name, freeze_base_network=freeze_base_network, use_gpu=True, gpu_memory_fraction=0.6, use_pretrained=True);
self.Model();
model_name = self.system_dict["model"]["params"]["model_name"];
if("resnet" in model_name or "vgg" in model_name or "dense" in model_name or "xception" in model_name):
self.optimizer_sgd(0.0001, momentum=0.9);
self.lr_plateau_decrease(factor=0.1, patience=max(min(10, num_epochs//3), 1), verbose=True);
self.loss_crossentropy();
elif("nas" in model_name):
self.optimizer_rmsprop(0.0001, weight_decay=0.00004, momentum=0.9);
self.lr_step_decrease(2, gamma=0.97);
self.loss_crossentropy();
elif("mobile" in model_name):
self.optimizer_sgd(0.0001, weight_decay=0.00004, momentum=0.9);
self.lr_step_decrease(1, gamma=0.97);
self.loss_crossentropy();
elif("inception" in model_name):
self.optimizer_sgd(0.0001, weight_decay=0.0001, momentum=0.9);
self.lr_step_decrease(1, gamma=0.9);
self.loss_crossentropy();
self.Training_Params(num_epochs=num_epochs, display_progress=True, display_progress_realtime=True,
save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True);
self.system_dict["hyper-parameters"]["status"] = True;
save(self.system_dict);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Summary(self):
'''
Print summary of entire project
Args:
None
Returns:
None
'''
print_summary(self.system_dict["fname_relative"]);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Models(self):
'''
List all base models supported.
Args:
None
Returns:
None
'''
self.print_list_models();
###############################################################################################################################################
## Will be depricated in v2.0
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers(self):
'''
List all layers available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_layers_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers_Transfer_Learning(self):
'''
List all layers available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_layers_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers_Custom_Model(self):
'''
List all layers available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_layers_custom_model();
###############################################################################################################################################
## Will be depricated in v2.0
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations(self):
'''
List all activations available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_activations_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations_Transfer_Learning(self):
'''
List all activations available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_activations_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations_Custom_Model(self):
'''
List all activations available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_activations_custom_model();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Losses(self):
'''
List all loss functions available.
Args:
None
Returns:
None
'''
self.print_list_losses();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Optimizers(self):
'''
List all optimizers functions available.
Args:
None
Returns:
None
'''
self.print_list_optimizers();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Schedulers(self):
'''
List all learning rate scheduler functions available.
Args:
None
Returns:
None
'''
self.print_list_schedulers();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Transforms(self):
'''
List all data transformation functions available.
Args:
None
Returns:
None
'''
self.print_list_transforms();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Blocks(self):
'''
List all blocks available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_blocks();
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Learning_Rates(self, analysis_name, lr_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse learning rate
Takes in a list of learning rates and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
lr_list (list): List of learning rates.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Learning rate analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(lr_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(lr_list))); #Change 3
experiment = "Learning_Rate_" + str(lr_list[i]); #Change 4
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_learning_rate(lr_list[i]) #Change 5
ktf_.Reload(); #Change 6
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(lr_list)): #Change 7
project = analysis_name;
experiment = "Learning_Rate_" + str(lr_list[i]); #Change 8
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Input_Sizes(self, analysis_name, inp_size_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse input sizes
Takes in a list of input sizes and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
inp_size_list | |
def __getitem__(self, i):
return self.stops[i]
def __iter__(self):
return iter(self.stops)
def __repr__(self):
return (
'Route(id={}, mode={}, ref={}, name={}, network={}, interval={}, '
'circular={}, num_stops={}, line_length={} m, from={}, to={}'
).format(
self.id,
self.mode,
self.ref,
self.name,
self.network,
self.interval,
self.is_circular,
len(self.stops),
self.stops[-1].distance,
self.stops[0],
self.stops[-1],
)
class RouteMaster:
def __init__(self, master=None):
self.routes = []
self.best = None
self.id = el_id(master)
self.has_master = master is not None
self.interval_from_master = False
if master:
self.ref = master['tags'].get(
'ref', master['tags'].get('name', None)
)
try:
self.colour = normalize_colour(
master['tags'].get('colour', None)
)
except ValueError:
self.colour = None
try:
self.infill = normalize_colour(
master['tags'].get('colour:infill', None)
)
except ValueError:
self.colour = None
self.network = Route.get_network(master)
self.mode = master['tags'].get(
'route_master', None
) # This tag is required, but okay
self.name = master['tags'].get('name', None)
self.interval = Route.get_interval(master['tags'])
self.interval_from_master = self.interval is not None
else:
self.ref = None
self.colour = None
self.infill = None
self.network = None
self.mode = None
self.name = None
self.interval = None
def add(self, route, city):
if not self.network:
self.network = route.network
elif route.network and route.network != self.network:
city.error(
'Route has different network ("{}") from master "{}"'.format(
route.network, self.network
),
route.element,
)
if not self.colour:
self.colour = route.colour
elif route.colour and route.colour != self.colour:
city.notice(
'Route "{}" has different colour from master "{}"'.format(
route.colour, self.colour
),
route.element,
)
if not self.infill:
self.infill = route.infill
elif route.infill and route.infill != self.infill:
city.notice(
'Route "{}" has different infill colour from master "{}"'.format(
route.infill, self.infill
),
route.element,
)
if not self.ref:
self.ref = route.ref
elif route.ref != self.ref:
city.notice(
'Route "{}" has different ref from master "{}"'.format(
route.ref, self.ref
),
route.element,
)
if not self.name:
self.name = route.name
if not self.mode:
self.mode = route.mode
elif route.mode != self.mode:
city.error(
'Incompatible PT mode: master has {} and route has {}'.format(
self.mode, route.mode
),
route.element,
)
return
if not self.interval_from_master and route.interval:
if not self.interval:
self.interval = route.interval
else:
self.interval = min(self.interval, route.interval)
if not self.has_master and (not self.id or self.id > route.id):
self.id = route.id
self.routes.append(route)
if not self.best or len(route.stops) > len(self.best.stops):
self.best = route
def stop_areas(self):
"""Returns a list of all stations on all route variants."""
seen_ids = set()
for route in self.routes:
for stop in route:
st = stop.stoparea
if st.id not in seen_ids:
seen_ids.add(st.id)
yield st
def __len__(self):
return len(self.routes)
def __getitem__(self, i):
return self.routes[i]
def __iter__(self):
return iter(self.routes)
def __repr__(self):
return 'RouteMaster(id={}, mode={}, ref={}, name={}, network={}, num_variants={}'.format(
self.id,
self.mode,
self.ref,
self.name,
self.network,
len(self.routes),
)
class City:
def __init__(self, row, overground=False):
self.errors = []
self.warnings = []
self.notices = []
self.name = row[1]
self.country = row[2]
self.continent = row[3]
if not row[0]:
self.error('City {} does not have an id'.format(self.name))
self.id = int(row[0] or '0')
self.overground = overground
if not overground:
self.num_stations = int(row[4])
self.num_lines = int(row[5] or '0')
self.num_light_lines = int(row[6] or '0')
self.num_interchanges = int(row[7] or '0')
else:
self.num_tram_lines = int(row[4] or '0')
self.num_trolleybus_lines = int(row[5] or '0')
self.num_bus_lines = int(row[6] or '0')
self.num_other_lines = int(row[7] or '0')
# Aquiring list of networks and modes
networks = None if len(row) <= 9 else row[9].split(':')
if not networks or len(networks[-1]) == 0:
self.networks = []
else:
self.networks = set(
filter(None, [x.strip() for x in networks[-1].split(';')])
)
if not networks or len(networks) < 2 or len(networks[0]) == 0:
if self.overground:
self.modes = DEFAULT_MODES_OVERGROUND
else:
self.modes = DEFAULT_MODES_RAPID
else:
self.modes = set([x.strip() for x in networks[0].split(',')])
# Reversing bbox so it is (xmin, ymin, xmax, ymax)
bbox = row[8].split(',')
if len(bbox) == 4:
self.bbox = [float(bbox[i]) for i in (1, 0, 3, 2)]
else:
self.bbox = None
self.elements = {} # Dict el_id → el
self.stations = defaultdict(list) # Dict el_id → list of StopAreas
self.routes = {} # Dict route_master_ref → RouteMaster
self.masters = {} # Dict el_id of route → route_master
self.stop_areas = defaultdict(
list
) # El_id → list of el_id of stop_area
self.transfers = [] # List of lists of stop areas
self.station_ids = set() # Set of stations' uid
self.stops_and_platforms = set() # Set of stops and platforms el_id
self.recovery_data = None
@staticmethod
def log_message(message, el):
if el:
tags = el.get('tags', {})
message += ' ({} {}, "{}")'.format(
el['type'],
el.get('id', el.get('ref')),
tags.get('name', tags.get('ref', '')),
)
return message
def notice(self, message, el=None):
"""This type of message may point to a potential problem."""
msg = City.log_message(message, el)
self.notices.append(msg)
def warn(self, message, el=None):
"""A warning is definitely a problem but is doesn't prevent
from building a routing file and doesn't invalidate the city.
"""
msg = City.log_message(message, el)
self.warnings.append(msg)
def error(self, message, el=None):
"""Error if a critical problem that invalidates the city"""
msg = City.log_message(message, el)
self.errors.append(msg)
def contains(self, el):
center = el_center(el)
if center:
return (
self.bbox[0] <= center[1] <= self.bbox[2]
and self.bbox[1] <= center[0] <= self.bbox[3]
)
return False
def add(self, el):
if el['type'] == 'relation' and 'members' not in el:
return
self.elements[el_id(el)] = el
if el['type'] == 'relation' and 'tags' in el:
if el['tags'].get('type') == 'route_master':
for m in el['members']:
if m['type'] == 'relation':
if el_id(m) in self.masters:
self.error('Route in two route_masters', m)
self.masters[el_id(m)] = el
elif el['tags'].get('public_transport') == 'stop_area':
warned_about_duplicates = False
for m in el['members']:
stop_areas = self.stop_areas[el_id(m)]
if el in stop_areas:
if not warned_about_duplicates:
self.warn('Duplicate element in a stop area', el)
warned_about_duplicates = True
else:
stop_areas.append(el)
def make_transfer(self, sag):
transfer = set()
for m in sag['members']:
k = el_id(m)
el = self.elements.get(k)
if not el:
# A sag member may validly not belong to the city while
# the sag does - near the city bbox boundary
continue
if 'tags' not in el:
self.warn(
'An untagged object {} in a stop_area_group'.format(k), sag
)
continue
if (
el['type'] != 'relation'
or el['tags'].get('type') != 'public_transport'
or el['tags'].get('public_transport') != 'stop_area'
):
continue
if k in self.stations:
stoparea = self.stations[k][0]
transfer.add(stoparea)
if stoparea.transfer:
# TODO: properly process such cases.
# Counterexample 1: Paris,
# Châtelet subway station <->
# "Châtelet - Les Halles" railway station <->
# Les Halles subway station
# Counterexample 2: Saint-Petersburg, transfers Витебский вокзал <-> Пушкинская <-> Звенигородская
self.warn(
'Stop area {} belongs to multiple interchanges'.format(
k
)
)
stoparea.transfer = el_id(sag)
if len(transfer) > 1:
self.transfers.append(transfer)
def extract_routes(self):
# Extract stations
processed_stop_areas = set()
for el in self.elements.values():
if Station.is_station(el, self.modes):
# See PR https://github.com/mapsme/subways/pull/98
if (
el['type'] == 'relation'
and el['tags'].get('type') != 'multipolygon'
):
self.warn(
"A railway station cannot be a relation of type '{}'".format(
el['tags'].get('type')
),
el,
)
continue
st = Station(el, self)
self.station_ids.add(st.id)
if st.id in self.stop_areas:
stations = []
for sa in self.stop_areas[st.id]:
stations.append(StopArea(st, self, sa))
else:
stations = [StopArea(st, self)]
for station in stations:
if station.id not in processed_stop_areas:
processed_stop_areas.add(station.id)
for st_el in station.get_elements():
self.stations[st_el].append(station)
# Check that stops and platforms belong to single stop_area
for sp in station.stops | station.platforms:
if sp in self.stops_and_platforms:
self.notice(
'A stop or a platform {} belongs to multiple '
'stop areas, might be correct'.format(sp)
)
else:
self.stops_and_platforms.add(sp)
# Extract routes
for el in self.elements.values():
if Route.is_route(el, self.modes):
if el['tags'].get('access') in ('no', 'private'):
continue
route_id = el_id(el)
master = self.masters.get(route_id, None)
if self.networks:
network = Route.get_network(el)
if master:
master_network = Route.get_network(master)
else:
master_network = None
if (
network not in self.networks
and master_network not in self.networks
):
continue
route = Route(el, self, master)
if not route.stops:
self.warn('Route has no stops', el)
continue
elif len(route.stops) == 1:
self.warn('Route has only one stop', el)
continue
k = el_id(master) if master else route.ref
if k not in self.routes:
self.routes[k] = RouteMaster(master)
self.routes[k].add(route, self)
# Sometimes adding a route to a newly initialized RouteMaster can fail
if len(self.routes[k]) == 0:
del self.routes[k]
# And while we're iterating over relations, find interchanges
if (
el['type'] == 'relation'
and el.get('tags', {}).get('public_transport', None)
== 'stop_area_group'
):
self.make_transfer(el)
# Filter transfers, leaving only stations that belong to routes
used_stop_areas = set()
for rmaster in self.routes.values():
for route in rmaster:
used_stop_areas.update([s.stoparea for s in route.stops])
new_transfers = []
for transfer in self.transfers:
new_tr = [s for s in transfer | |
needed so that the first two
// significant digits are visible.
function stringify(number) {
"use strict";
if (number === 0) {
return number;
}
if (number < 0) {
return "-" + stringify(-number);
}
var precision = 1;
var result = 0;
while (result === 0) {
precision *= 10;
result = Math.round(number * precision) / precision;
}
precision *= 10;
return Math.round(number * precision) / precision;
}
// Update the visibility and width of a specific cell.
function update_cell(visible_columns_mask,
scale_factor, visible_size, cell_id) {
"use strict";
var cell_data = cells_data[cell_id];
var cell = document.getElementById(cell_id);
var cell_offset_is_done = false;
var cell_offset = 0;
var cell_size = 0;
cell_data.columns_mask.forEach(function (is_column_used, column_index) {
if (visible_columns_mask[column_index] > 0) {
if (is_column_used > 0) {
cell_offset_is_done = true;
cell_size += column_sizes[column_index];
} else if (!cell_offset_is_done) {
cell_offset += column_sizes[column_index];
}
}
});
if (!cell_offset_is_done) {
cell.style.display = "none";
return;
}
cell.style.display = null;
var left = Math.round(cell_offset * scale_factor);
cell.style.left = left + "px";
var width = Math.round((cell_offset + cell_size) * scale_factor) - left;
cell.style.width = width + "px";
var computed = cell.querySelector(".computed");
if (!computed) {
return;
}
var computed_text = stringify(cell_size);
if (cell_size === total_size) {
computed.innerText = computed_text;
return;
}
if (visible_size !== total_size && cell_size !== visible_size) {
var percentage_of_visible = 100 * cell_size / visible_size;
computed_text += "<br/>" + stringify(percentage_of_visible) +
"% out of: " + stringify(visible_size) + " visible";
}
var percentage_of_total = 100 * cell_size / total_size;
computed_text += "<br/>" + stringify(percentage_of_total) +
"% out of: " + stringify(total_size) + " total";
computed.innerHTML = computed_text;
}
// Update all the cells visibility and width.
//
// Must be done every time the selected cell and/or the display width change.
function update_cells() {
"use strict";
var visible_columns_mask = compute_visible_columns_mask();
var visible_size = compute_visible_size(visible_columns_mask);
var graph_width = document.getElementById("width").clientWidth;
var graph = document.getElementById("graph");
graph.style.width = graph_width + "px";
var scale_factor = (graph_width - 2) / visible_size;
Object.keys(cells_data).forEach(function (cell_id) {
update_cell(visible_columns_mask, scale_factor, visible_size, cell_id);
});
}
// Cell hover highlights all cells in a group.
// The cell itself is highlighted using the :hover CSS selector.
// The other cells in the group are highlighted using the group_hover class.
// Highlight all group cells on entry.
function on_over(event) {
"use strict";
var cell = event.currentTarget;
var group_id = cells_data[cell.id].group_id;
if (group_id) {
groups_data[group_id].cell_ids.forEach(function (group_cell_id) {
var group_cell = document.getElementById(group_cell_id);
group_cell.classList.add("group_hover");
});
} else {
cell.classList.add("group_hover");
}
}
// Unhighlight all group cells on exit.
function on_out(event) {
"use strict";
var cell = event.currentTarget;
var group_id = cells_data[cell.id].group_id;
if (group_id) {
groups_data[group_id].cell_ids.forEach(function (group_cell_id) {
var group_cell = document.getElementById(group_cell_id);
group_cell.classList.remove("group_hover");
});
} else {
cell.classList.remove("group_hover");
}
}
// Select a cell for filtering the visible graph content.
//
// A simple click just shows the selected cell columns,
// a control-click adds/removes selected cells,
// an alt-click toggles tooltips.
//
// When multiple cells are selected, the lowest-level one restricts the set of
// columns, and each additional higher-level cell further restricts the columns
// to these covered by the group the cell belongs to.
function on_click(event) {
"use strict";
var cell = event.currentTarget;
if (event.altKey) {
document.getElementById("graph").classList.add("tooltipped");
return;
}
if (!event.ctrlKey) {
selected_cell_ids.forEach(function (cell_id) {
document.getElementById(cell_id).classList.remove("selected");
});
selected_cell_ids = [cell.id];
cell.classList.add("selected");
update_cells();
return;
}
var new_selected_cell_ids = [];
selected_cell_ids.forEach(function (cell_id) {
if (cell_id !== cell.id) {
new_selected_cell_ids.push(cell_id);
}
});
if (new_selected_cell_ids.length === selected_cell_ids.length) {
selected_cell_ids.push(cell.id);
cell.classList.add("selected");
update_cells();
return;
}
cell.classList.remove("selected");
selected_cell_ids = new_selected_cell_ids;
if (new_selected_cell_ids.length === 0) {
selected_cell_ids = [root_id];
document.getElementById(root_id).classList.add("selected");
}
update_cells();
}
// Disable tooltips.
function disable_tooltip(event) {
"use strict";
if (event.altKey) {
document.getElementById("graph").classList.remove("tooltipped");
event.stopPropagation();
}
}
// Attach handlers to table cells.
function register_handlers() {
"use strict";
Object.keys(cells_data).forEach(function (cell_id) {
var cell = document.getElementById(cell_id);
cell.onclick = on_click;
cell.onmouseover = on_over;
cell.onmouseout = on_out;
var tooltip = cell.querySelector(".tooltip");
if (tooltip) {
tooltip.onclick = disable_tooltip;
}
});
}
function compute_groups_columns_masks() {
"use strict";
Object.keys(groups_data).forEach(function (group_id) {
var group_data = groups_data[group_id];
group_data.cell_ids.forEach(function (cell_id) {
var cell_data = cells_data[cell_id];
if (!group_data.columns_mask) {
group_data.columns_mask = cell_data.columns_mask.slice();
} else {
var columns_mask = cell_data.columns_mask;
columns_mask.forEach(function (is_column_used, column_index) {
if (is_column_used > 0) {
group_data.columns_mask[column_index] = 1;
}
});
}
});
});
}
function on_load() {
"use strict";
register_handlers();
total_size = compute_visible_size(cells_data[root_id].columns_mask);
compute_groups_columns_masks();
on_click({
"currentTarget": document.getElementById(root_id),
"ctrlKey": false
});
}
// On resize, update all the cell widths.
window.onresize = update_cells;
// Properly initialize everything on load.
window.onload = on_load;
</script>
</head>
<body>
"""[1:]
AFTER_HTML = """
<div id="width"></div>
</body>
</html>
"""[1:]
def _print_output_data(args: Namespace, groups: Dict[str, List[int]],
column_sizes: List[float], rows: List[List[Node]]) -> None:
if args.output is None or args.output == '-':
_print_output_file(sys.stdout, args, groups, column_sizes, rows)
else:
with open(args.output, 'w') as file:
_print_output_file(file, args, groups, column_sizes, rows)
def _print_output_file(file: TextIO, args: Namespace, groups: Dict[str, List[int]],
column_sizes: List[float], rows: List[List[Node]]) -> None:
file.write(BEFORE_TITLE)
title = args.title
if title is None:
if args.inverted:
title = "Icicle Graph"
else:
title = "Flame Graph"
_print_title(file, title)
file.write(BEFORE_CSS)
if not args.nodefaultcss:
file.write(DEFAULT_APPEARANCE_CSS)
for css_path in args.addcss or []:
try:
with open(css_path, 'r') as css_file:
file.write(css_file.read())
except FileNotFoundError:
sys.stderr.write('flameview.py: No such file or directory: %s\n' % css_path)
sys.exit(1)
file.write(BEFORE_JAVASCRIPT)
_print_groups_data(file, groups)
_print_cells_data(file, rows, len(column_sizes))
_print_column_sizes(file, column_sizes)
file.write(BEFORE_HTML)
_print_h1(file, title)
if args.inverted:
_print_table(file, args.sizename, args.colors, rows)
else:
_print_table(file, args.sizename, args.colors, list(reversed(rows)))
file.write(AFTER_HTML)
def _print_title(file: TextIO, title: str) -> None:
file.write('<title>%s</title>' % title)
def _print_groups_data(file: TextIO, groups: Dict[str, List[int]]) -> None:
file.write(dedent("""
// Data for each cells group:
// cell_ids: The ids of the group cells.
// On load, the following is computed for each group:
// columns_mask: A 0/1 mask of all the columns used by the group cells.
var groups_data = {
"""))
group_lines = [' "%s": {"cell_ids": ["%s"]}'
% (group_name, '", "'.join(['N' + str(id) for id in sorted(cell_ids)]))
for group_name, cell_ids in sorted(groups.items())]
file.write(',\n'.join(group_lines))
file.write('\n};\n\n')
def _print_cells_data(file: TextIO, rows: List[List[Node]], columns_count: int) -> None:
file.write(dedent("""
// Data for each cell:
// level: The stack nesting level.
// columns_mask: A 0/1 mask of all the columns used by the cell.
// group_id: The group the cell belongs to, if any.
var cells_data = {
""")[1:-1])
is_first = True
for level, row in enumerate(rows):
for node in row:
if not is_first:
file.write(',')
file.write('\n ')
_print_cell_data(file, node, columns_count, level)
is_first = False
file.write('\n};\n')
def _print_cell_data(file: TextIO, node: Node, columns_count: int, level: int) -> None:
file.write('"N%s": {\n' % node.index)
file.write(' "level": %s' % level)
file.write(',\n "columns_mask": [%s]'
% _columns_mask(node.column, node.columns_span, columns_count))
if node.group:
file.write(',\n "group_id": "%s"' % node.group)
file.write('\n }')
def _columns_mask(column: int, columns_span: int, columns_count: int) -> str:
prefix = ["0"] * column
middle = ["1"] * columns_span
suffix = ["0"] * (columns_count - column - columns_span)
return ', '.join(prefix + middle + suffix)
def _print_column_sizes(file, column_sizes: List[float]) -> None:
file.write(dedent("""
// The size of each leaf/self cell (that is, a column).
var column_sizes = [%s];
""") % ', '.join([str(size) for size in column_sizes]))
file.write('\n')
def _print_h1(file: TextIO, title: str) -> None:
file.write('<h1 id="title">%s</h1>\n' % title)
def _print_table(file: TextIO, sizename: str, palette: str, rows: List[List[Node]]) -> None:
file.write('<div id="graph" class="tooltipped">\n')
for row in rows:
_print_row(file, sizename, palette, row)
file.write('</div>\n')
def _print_row(file: TextIO, sizename: str, palette: str, row: List[Node]) -> None:
file.write('<div class="row">\n')
for node in row:
_print_node(file, sizename, palette, node)
file.write('<div class="height"> </div>\n')
file.write('</div>\n')
def _print_node(file: TextIO, sizename: str, palette: str, node: Node) -> None:
file.write('<div id="N%s" class="%s"' % (node.index, node.klass))
file.write(' style="background-color: %s">\n' % _node_color(node, palette))
_print_tooltip(file, sizename, node)
_print_label(file, node)
file.write('</div>\n')
def _node_color(node: Node, palette: str) -> str:
if len(node.label) == 1:
red, green, blue = 160.0, 160.0, 160.0
else:
red, green, blue = {
'hot': _hot_color,
'mem': _mem_color,
'io': _io_color,
'red': _red_color,
'green': _green_color,
'blue': _blue_color,
'aqua': _aqua_color,
'yellow': _yellow_color,
'purple': _purple_color,
'orange': _orange_color,
}[palette]()
return 'rgb(%d, %d, %d)' % (red, green, blue)
# Palettes were copied from flamegraph.pl:
def _hot_color() -> Tuple[float, float, float]:
red = 205 + 50 * random()
green = 230 * random()
blue = 55 * random()
return red, green, blue
def _mem_color() -> Tuple[float, float, float]:
red = 0.0
green = 190 + 50 * random()
blue = 210 * random()
return red, green, blue
def _io_color() -> Tuple[float, float, float]:
red = 80 + 60 * random()
green = red
blue = 190 + 55 * random()
return red, green, blue
def _red_color() -> Tuple[float, float, float]:
fraction = random()
red = 200 + 55 * fraction
| |
FieldPanel('model_fotogrametrie')
]
edit_handler = TabbedInterface(
[
ObjectList(localizare_panels, heading='Localizare/peisaj'),
ObjectList(ansamblu_panels, heading='Ansamblu construit'),
ObjectList(arhitectura_panels, heading='Arhitectura bisericii'),
ObjectList(structura_panels, heading='Structura'),
ObjectList(finisaje_panels, heading='Finisaje'),
ObjectList(interventii_panels,
heading='Intervenții arhitecturale vizibile în timp'),
ObjectList(modele_3d_panels, heading='Modele 3d')
]
)
class Meta: # noqa
verbose_name = "Capitol Descriere"
verbose_name_plural = "Capitole Descriere"
@classmethod
def get_image_collection(cls):
print(cls)
# any logic here to determine what collection to filter by
return 2
def save(self, *args, **kwargs):
if self.model_nori_de_puncte:
self.are_scanare_laser = True
else:
self.are_scanare_laser = False
if self.model_fotogrametrie:
self.are_model_fotogrametric = True
else:
self.are_model_fotogrametric = False
self.ansamblu_construit = [x.element.nume for x in self.elemente_ansamblu_construit.all()]
self.numar_clopote = len(self.clopote.all())
return super().save(*args, **kwargs)
class Persoana(models.Model):
nume = models.CharField(max_length=250)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
sursa = RichTextField(features=[], null=True,
blank=True, verbose_name='Sursa')
panels = [
FieldPanel('nume'),
FieldPanel('observatii'),
FieldPanel('sursa'),
]
class Meta:
abstract = True
def __str__(self):
return self.nume
class PozeCtitori(Orderable):
page = ParentalKey('Ctitori',
on_delete=models.CASCADE, related_name='poze')
poza = models.ForeignKey('wagtailimages.Image', null=True,
blank=True, on_delete=models.SET_NULL, related_name='+')
rendition = models.JSONField(null=True, blank=True)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
panels = [
ImageChooserPanel('poza'),
FieldPanel('observatii'),
]
class Ctitori(ClusterableModel, Orderable, Persoana):
page = ParentalKey(
'IstoricPage', on_delete=models.PROTECT, related_name='ctitori')
panels = Persoana.panels + [
InlinePanel('poze', label='Poză')
]
class PozeMesteri(Orderable):
page = ParentalKey('Mesteri',
on_delete=models.CASCADE, related_name='poze')
poza = models.ForeignKey('wagtailimages.Image', null=True,
blank=True, on_delete=models.SET_NULL, related_name='+')
rendition = models.JSONField(null=True, blank=True)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
panels = [
ImageChooserPanel('poza'),
FieldPanel('observatii'),
]
class Mesteri(ClusterableModel, Orderable, Persoana):
page = ParentalKey(
'IstoricPage', on_delete=models.PROTECT, related_name='mesteri')
panels = Persoana.panels + [
InlinePanel('poze', label='Poză')
]
class PozeZugravi(Orderable):
page = ParentalKey('Zugravi',
on_delete=models.CASCADE, related_name='poze')
poza = models.ForeignKey('wagtailimages.Image', null=True,
blank=True, on_delete=models.SET_NULL, related_name='+')
rendition = models.JSONField(null=True, blank=True)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
panels = [
ImageChooserPanel('poza'),
FieldPanel('observatii'),
]
class Zugravi(ClusterableModel, Orderable, Persoana):
page = ParentalKey(
'IstoricPage', on_delete=models.PROTECT, related_name='zugravi')
panels = Persoana.panels + [
InlinePanel('poze', label='Poză')
]
class PozePersonalitati(Orderable):
page = ParentalKey('Personalitati',
on_delete=models.CASCADE, related_name='poze')
poza = models.ForeignKey('wagtailimages.Image', null=True,
blank=True, on_delete=models.SET_NULL, related_name='+')
rendition = models.JSONField(null=True, blank=True)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
panels = [
ImageChooserPanel('poza'),
FieldPanel('observatii'),
]
class Personalitati(ClusterableModel, Orderable, Persoana):
page = ParentalKey('IstoricPage', on_delete=models.PROTECT,
related_name='personalitati')
panels = Persoana.panels + [
InlinePanel('poze', label='Poză')
]
class Eveniment(models.Model):
nume = models.CharField(max_length=250)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
panels = [
FieldPanel('nume'),
FieldPanel('observatii'),
]
class Meta:
abstract = True
def __str__(self):
return self.nume
class Evenimente(Orderable, Eveniment):
page = ParentalKey('IstoricPage', on_delete=models.CASCADE,
related_name='evenimente')
class MutareBiserica(models.Model):
localitate = models.ForeignKey('nomenclatoare.Localitate', null=True,
blank=True, on_delete=models.SET_NULL, related_name='p_biserici_mutari')
adresa = models.CharField(max_length=250, null=True, blank=True)
latitudine = models.FloatField(null=True, blank=True)
longitudine = models.FloatField(null=True, blank=True)
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
sursa = RichTextField(features=[], null=True,
blank=True, verbose_name='Sursa')
panels = [
SnippetChooserPanel('localitate'),
FieldPanel('adresa'),
FieldPanel('latitudine'),
FieldPanel('longitudine'),
FieldPanel('observatii'),
FieldPanel('sursa'),
]
class Meta:
abstract = True
def __str__(self):
return str(self.localitate)
class MutariBiserica(Orderable, MutareBiserica):
page = ParentalKey(
'IstoricPage', on_delete=models.CASCADE, related_name='mutari')
class PovesteBiserica(models.Model):
observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
sursa = RichTextField(features=[], null=True,
blank=True, verbose_name='Sursa')
panels = [
FieldPanel('observatii'),
FieldPanel('sursa'),
]
class Meta:
abstract = True
def __str__(self):
return self.sursa
class PovestiBiserica(Orderable, PovesteBiserica):
page = ParentalKey(
'IstoricPage', on_delete=models.CASCADE, related_name='povesti')
class PozePisanie(Orderable, Poza):
page = ParentalKey(
'IstoricPage', on_delete=models.CASCADE, related_name='poze_pisanie')
class IstoricPage(Page):
sursa_datare = ParentalManyToManyField(
'nomenclatoare.SursaDatare', related_name='p_biserici', blank=True)
an_constructie = models.IntegerField(null=True, blank=True)
datare_prin_interval_timp = models.CharField(
max_length=50, null=True, blank=True)
datare_secol = models.ForeignKey('nomenclatoare.Secol', null=True,
blank=True, on_delete=models.SET_NULL, related_name='p_biserici')
datare_secol_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
datare_secol_sursa = RichTextField(
features=[], null=True, blank=True, verbose_name='Sursa')
studiu_dendocronologic_fisier = models.ForeignKey(
'wagtaildocs.Document', null=True, blank=True, on_delete=models.SET_NULL, related_name='+', verbose_name='Fișier')
studiu_dendocronologic_autor = models.CharField(
max_length=150, null=True, blank=True, verbose_name='Autor')
studiu_dendocronologic_an = models.IntegerField(
null=True, blank=True, verbose_name='An')
studiu_dendocronologic_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
pisanie_traducere = RichTextField(
features=[], null=True, blank=True, verbose_name='Traducere')
pisanie_secol_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
pisanie_secol_sursa = RichTextField(
features=[], null=True, blank=True, verbose_name='Sursa')
are_pisanie = models.BooleanField(default=False, verbose_name='Pisanie')
are_studiu_dendro = models.BooleanField(default=False, verbose_name='Studiu Dendrocronologic')
are_mutari = models.BooleanField(default=False, verbose_name='Mutări')
lista_ctitori = ArrayField(
models.CharField(max_length=100, blank=True),
size=20,
null=True,
blank=True,
verbose_name='Ctitori'
)
lista_mesteri = ArrayField(
models.CharField(max_length=100, blank=True),
size=20,
null=True,
blank=True,
verbose_name='Meșteri'
)
lista_zugravi = ArrayField(
models.CharField(max_length=100, blank=True),
size=20,
null=True,
blank=True,
verbose_name='Zugravi'
)
lista_personalitati = ArrayField(
models.CharField(max_length=100, blank=True),
size=20,
null=True,
blank=True,
verbose_name='Personalități'
)
istoric_panels = [
MultiFieldPanel([
FieldPanel('sursa_datare', widget=forms.CheckboxSelectMultiple),
FieldPanel('an_constructie'),
FieldPanel('datare_prin_interval_timp'),
FieldPanel('datare_secol'),
FieldPanel('datare_secol_observatii'),
FieldPanel('datare_secol_sursa'),
],
heading="Datare",
classname="collapsible collapsed ",
),
MultiFieldPanel([
DocumentChooserPanel('studiu_dendocronologic_fisier'),
FieldPanel('studiu_dendocronologic_autor'),
FieldPanel('studiu_dendocronologic_an'),
FieldPanel('studiu_dendocronologic_observatii')
],
heading="Studiu dendrocronologic",
classname="collapsible collapsed ",
),
MultiFieldPanel([
FieldPanel('pisanie_traducere'),
FieldPanel('pisanie_secol_observatii'),
FieldPanel('pisanie_secol_sursa'),
InlinePanel("poze_pisanie", label="Poză")
],
heading="Pisanie",
classname="collapsible collapsed ",
),
]
ctitori_panels = [
InlinePanel('ctitori', label='ctitori')
]
mesteri_panels = [
InlinePanel('mesteri', label='mesteri')
]
zugravi_panels = [
InlinePanel('zugravi', label='zugravi')
]
personalitati_panels = [
InlinePanel('personalitati', label='personalitati')
]
evenimente_panels = [
InlinePanel('evenimente', label='evenimente')
]
mutari_panels = [
InlinePanel('mutari', label='mutari')
]
povesti_panels = [
InlinePanel('povesti', label='povesti')
]
edit_handler = TabbedInterface(
[
ObjectList(istoric_panels, heading='Istoric'),
ObjectList(ctitori_panels, heading='Ctitori'),
ObjectList(mesteri_panels, heading='Mesteri'),
ObjectList(zugravi_panels, heading='Zugravi'),
ObjectList(personalitati_panels, heading='Personalitati'),
ObjectList(evenimente_panels, heading='Evenimente'),
ObjectList(mutari_panels, heading='Mutari'),
ObjectList(povesti_panels, heading='Povesti'),
])
class Meta: # noqa
verbose_name = "Istoric"
verbose_name_plural = "Istoric"
def save(self, *args, **kwargs):
# biserica = self.get_parent().specific
# biserica.datare_secol = self.datare_secol
# biserica.datare_prin_interval_timp = self.datare_prin_interval_timp
# biserica.datare_an = self.an_constructie
# biserica.save_revision()
self.are_pisanie = True if self.pisanie_traducere else False
self.are_studiu_dendro = True if self.studiu_dendocronologic_fisier else False
self.are_mutari = True if self.mutari.all() else False
self.lista_ctitori = [x.nume for x in self.ctitori.all()]
self.lista_mesteri = [x.nume for x in self.mesteri.all()]
self.lista_zugravi = [x.nume for x in self.zugravi.all()]
self.lista_personalitati = [x.nume for x in self.personalitati.all()]
return super().save(*args, **kwargs)
class ValoarePage(Page):
total = models.FloatField(null=True, blank=True)
vechime = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Printr-un algorim definit se va da automat o notă de la 1-5 în funcție de vechimea monumentului si a picturii descrise conform OMCC2682/2003 ETC", verbose_name='Clasa')
vechime_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
integritate = models.IntegerField(choices=CLASE_EVALUARE, null=True,
blank=True, help_text="Integritate / Autenticitate", verbose_name='Clasa')
integritate_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
unicitate = models.IntegerField(choices=CLASE_EVALUARE, null=True,
blank=True, help_text="Unicitate / raritate", verbose_name='Clasa')
unicitate_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
valoare_memoriala = models.IntegerField(
choices=CLASE_EVALUARE, null=True, blank=True, help_text="evenimente, personalități", verbose_name='Clasa')
valoare_memoriala_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
peisaj_cultural = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Parte definitorie a peisajului cultural al zonei", verbose_name='Clasa')
peisaj_cultural_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
valoare_sit = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Valoarea sitului împreună cu toate componentele ansamblului din care face parte, ținând cont de integritate, autenticitate, estetică peisageră, biodiversitate, etc. SUBIECTIV", verbose_name='Clasa')
valoare_sit_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
estetica = models.IntegerField(choices=CLASE_EVALUARE, null=True,
blank=True, help_text="Estetică / Arhitectură", verbose_name='Clasa')
estetica_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
mestesug = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Meșteșug (calitatea muncii - a se vedea golurile dintre lemne (dintre bârne în general dar în special la așezarea elementelor orizontale peste cele verticale))", verbose_name='Clasa')
mestesug_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
pictura = models.IntegerField(
choices=CLASE_EVALUARE, null=True, blank=True, verbose_name='Clasa')
pictura_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
folosinta_actuala = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Folosință actuală / singura biserică din sat / loc al patrimoniului imaterial", verbose_name='Clasa')
folosinta_actuala_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
relevanta_actuala = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Relevanța actuală pentru comunitatea locală (prin reprezentanții săi: preot, crâsnic, învățător, familii de bază)", verbose_name='Clasa')
relevanta_actuala_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
potential = models.IntegerField(choices=CLASE_EVALUARE, null=True, blank=True,
help_text="Potențialul de beneficii aduse comunității locale", verbose_name='Clasa')
potential_observatii = RichTextField(
features=[], null=True, blank=True, verbose_name='Observații')
valoare_panels = [
MultiFieldPanel(
[
FieldPanel('vechime'),
FieldPanel('vechime_observatii'),
],
heading="Vechime",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('integritate'),
FieldPanel('integritate_observatii'),
],
heading="Integritate / Autenticitate",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('unicitate'),
FieldPanel('unicitate_observatii'),
],
heading="Unicitate",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('valoare_memoriala'),
FieldPanel('valoare_memoriala_observatii'),
],
heading="Valoare memorială",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('peisaj_cultural'),
FieldPanel('peisaj_cultural_observatii'),
],
heading="Valoarea peisajului cultural",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('valoare_sit'),
FieldPanel('valoare_sit_observatii'),
],
heading="Valoarea sitului",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('estetica'),
FieldPanel('estetica_observatii'),
],
heading="Valoarea estetică",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('mestesug'),
FieldPanel('mestesug_observatii'),
],
heading="Valoarea meșteșugului",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('pictura'),
FieldPanel('pictura_observatii'),
],
heading="Valoarea componentei artistice",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('folosinta_actuala'),
FieldPanel('folosinta_actuala_observatii'),
],
heading="Folosința actuală",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('relevanta_actuala'),
FieldPanel('relevanta_actuala_observatii'),
],
heading="Relevanța actuală pentru comunitate",
classname="collapsible collapsed ",
),
MultiFieldPanel(
[
FieldPanel('potential'),
FieldPanel('potential_observatii'),
],
heading="Potențial",
classname="collapsible collapsed ",
),
]
edit_handler = TabbedInterface(
[
ObjectList(valoare_panels, heading='General'),
])
class Meta: # noqa
verbose_name = "Valoare"
verbose_name_plural = "Valoare"
def save(self, *args, **kwargs):
nota_valoare = 0
active_fields = 0
fields = [
"vechime",
"integritate",
"unicitate",
"valoare_memoriala",
"peisaj_cultural",
"valoare_sit",
"estetica",
"mestesug",
"pictura",
"folosinta_actuala",
"relevanta_actuala",
"potential",
]
important_fields = [
"vechime",
"integritate",
"unicitate",
"folosinta_actuala",
"relevanta_actuala",
"potential",
]
for field in fields:
field_value = getattr(self, field)
if field_value:
if field in important_fields:
nota_valoare += 2 * field_value
active_fields += 2
else:
nota_valoare += | |
"error": "ID {} already exists in status database".format(status["source_id"])
}
try:
table.put_item(Item=status, ConditionExpression=Attr("source_id").not_exists())
except Exception as e:
return {
"success": False,
"error": repr(e)
}
else:
logger.info("Status for {}: Created".format(status["source_id"]))
return {
"success": True,
"status": status
}
def update_status(source_id, step, code, text=None, link=None, except_on_fail=False):
"""Update the status of a given submission.
Arguments:
source_id (str): The source_id of the submission.
step (str or int): The step of the process to update.
code (char): The applicable status code character.
text (str): The message or error text. Only used if required for the code. Default None.
link (str): The link to add. Only used if required for the code. Default None.
except_on_fail (bool): If True, will raise an Exception if the status cannot be updated.
If False, will return a dict as normal, with success=False.
Returns:
dict: success (bool): Success state
error (str): The error. Only exists if success is False.
status (str): The updated status. Only exists if success is True.
"""
# Clean text and link (if present)
if text:
# This could be done with a complex regex and replace, but .replace() is simpler
# and more robust - r'\\\\' only catches multiples of two backslashes,
# while r'\\' catches nothing, according to basic testing.
# So replacing all the reasonable escape sequences with spaces, deleting all backslashes,
# and condensing the spaces is sufficient.
# Removing newlines is okay in this particular case (simple status messages).
text = text.replace("\\n", " ").replace("\\t", " ").replace("\\r", " ").replace("\\", "")
while " " in text:
text = text.replace(" ", " ")
if link:
link = urllib.parse.quote(link, safe="/:?=")
# Get status table
tbl_res = old_get_dmo_table("status")
if not tbl_res["success"]:
if except_on_fail:
raise ValueError(tbl_res["error"])
return tbl_res
table = tbl_res["table"]
# Get old status
old_status = old_read_table("status", source_id)
if not old_status["success"]:
if except_on_fail:
raise ValueError(old_status["error"])
return old_status
status = old_status["status"]
# Update code
try:
step_index = int(step) - 1
except ValueError:
step_index = None
# TODO: Since deprecating /ingest, is the following still true?
# Why yes, this would be easier if STATUS_STEPS was a dict
# But we need slicing for translate_status
# Or else we're duplicating the info and making maintenance hard
# And I'd rather one dumb hack than several painful, error-prone changes
for i, s in enumerate(STATUS_STEPS):
if step == s[0]:
step_index = i
break
code_list = list(status["code"])
code_list[step_index] = code
# If needed, update messages or errors and cancel tasks
if code == 'M':
status["messages"][step_index] = (text or "No message available")
elif code == 'L':
status["messages"][step_index] = [
text or "No message available",
link or "No link available"
]
elif code == 'F':
status["messages"][step_index] = (text or "An error occurred and we're trying to fix it")
# Cancel subsequent tasks
code_list = code_list[:step_index+1] + ["X"]*len(code_list[step_index+1:])
elif code == 'H':
status["messages"][step_index] = [text or "An error occurred and we're trying to fix it",
link or "Help may be available soon."]
# Cancel subsequent tasks
code_list = code_list[:step_index+1] + ["X"]*len(code_list[step_index+1:])
elif code == 'R':
status["messages"][step_index] = (text or "An error occurred but we're recovering")
elif code == 'T':
status["messages"][step_index] = (text or "Retrying")
status["code"] = "".join(code_list)
status_valid = validate_status(status)
if not status_valid["success"]:
if except_on_fail:
raise ValueError(status_valid["error"])
return status_valid
try:
# put_item will overwrite
table.put_item(Item=status)
except Exception as e:
if except_on_fail:
raise
else:
return {
"success": False,
"error": repr(e)
}
else:
logger.info("[{}]{}: {}: {}, {}, {}".format(status["pid"], source_id, step, code,
text, link))
return {
"success": True,
"status": status
}
def modify_status_entry(source_id, modifications, except_on_fail=False):
"""Change the status entry of a given submission.
This is a generalized (and more powerful) version of update_status.
This function should be used carefully, as most fields in the status DB should never change.
Arguments:
source_id (str): The source_id of the submission.
modifications (dict): The keys and values to update.
except_on_fail (bool): If True, will raise an Exception if the status cannot be updated.
If False, will return a dict as normal, with success=False.
Returns:
dict: success (bool): Success state
error (str): The error. Only exists if success is False.
status (str): The updated status. Only exists if success is True.
"""
tbl_res = old_get_dmo_table("status")
if not tbl_res["success"]:
if except_on_fail:
raise ValueError(tbl_res["error"])
return tbl_res
table = tbl_res["table"]
# Get old status
old_status = old_read_table("status", source_id)
if not old_status["success"]:
if except_on_fail:
raise ValueError(old_status["error"])
return old_status
status = old_status["status"]
# Overwrite old status
status = mdf_toolbox.dict_merge(modifications, status)
status_valid = validate_status(status)
if not status_valid["success"]:
if except_on_fail:
raise ValueError(status_valid["error"])
return status_valid
try:
# put_item will overwrite
table.put_item(Item=status)
except Exception as e:
if except_on_fail:
raise
return {
"success": False,
"error": repr(e)
}
else:
logger.info("[{}]{}: Modified: '{}'".format(status["pid"], source_id, modifications))
return {
"success": True,
"status": status
}
def translate_status(status):
# {
# source_id: str,
# code: str, based on char position
# messages: list of str, in order generated
# errors: list of str, in order of failures
# title: str,
# submitter: str,
# submission_time: str
# }
full_code = list(status["code"])
messages = status["messages"]
steps = [st[1] for st in STATUS_STEPS]
usr_msg = ("Status of {}submission {} ({})\n"
"Submitted by {} at {}\n\n").format("TEST " if status["test"] else "",
status["source_id"],
status["title"],
status["submitter"],
status["submission_time"])
web_msg = []
for code, step, index in zip(full_code, steps, range(len(steps))):
if code == 'S':
msg = "{} was successful.".format(step)
usr_msg += msg + "\n"
web_msg.append({
"signal": "success",
"text": msg
})
elif code == 'M':
msg = "{} was successful: {}.".format(step, messages[index])
usr_msg += msg + "\n"
web_msg.append({
"signal": "success",
"text": msg
})
elif code == 'L':
tup_msg = messages[index]
msg = "{} was successful: {}.".format(step, tup_msg[0])
usr_msg += msg + " Link: {}\n".format(tup_msg[1])
web_msg.append({
"signal": "success",
"text": msg,
"link": tup_msg[1]
})
elif code == 'F':
msg = "{} failed: {}.".format(step, messages[index])
usr_msg += msg + "\n"
web_msg.append({
"signal": "failure",
"text": msg
})
elif code == 'R':
msg = "{} failed (processing will continue): {}.".format(step, messages[index])
usr_msg += msg + "\n"
web_msg.append({
"signal": "failure",
"text": msg
})
elif code == 'H':
tup_msg = messages[index]
msg = "{} failed: {}.".format(step, tup_msg[0])
usr_msg += msg + " Link: {}\n".format(tup_msg[1])
web_msg.append({
"signal": "failure",
"text": msg,
"link": tup_msg[1]
})
elif code == 'N':
msg = "{} was not requested or required.".format(step)
usr_msg += msg + "\n"
web_msg.append({
"signal": "idle",
"text": msg
})
elif code == 'P':
msg = "{} is in progress.".format(step)
usr_msg += msg + "\n"
web_msg.append({
"signal": "started",
"text": msg
})
elif code == 'T':
msg = "{} is retrying due to an error: {}".format(step, messages[index])
usr_msg += msg + "\n"
web_msg.append({
"signal": "started",
"text": msg
})
elif code == 'X':
msg = "{} was cancelled.".format(step)
usr_msg += msg + "\n"
web_msg.append({
"signal": "idle",
"text": msg
})
elif code == 'z':
msg = "{} has not started yet.".format(step)
usr_msg += msg + "\n"
web_msg.append({
"signal": "idle",
"text": msg
})
else:
msg = "{} is unknown. Code: '{}', message: '{}'".format(step, code, messages[index])
usr_msg += msg + "\n"
web_msg.append({
"signal": "warning",
"text": msg
})
return {
"source_id": status["source_id"],
"status_message": usr_msg,
"status_list": web_msg,
"status_code": status["code"],
"title": status["title"],
"submitter": status["submitter"],
"submission_time": status["submission_time"],
"test": status["test"],
"active": status["active"],
"original_submission": status["original_submission"]
}
def create_curation_task(task):
tbl_res = old_get_dmo_table("curation")
if not tbl_res["success"]:
return tbl_res
table = tbl_res["table"]
# Check that task does not already exist
if old_read_table("curation", task["source_id"])["success"]:
return {
"success": False,
"error": "ID {} already exists in curation database".format(task["source_id"])
}
try:
table.put_item(Item=task, ConditionExpression=Attr("source_id").not_exists())
except Exception as e:
return {
"success": False,
"error": repr(e)
}
else:
logger.info("Curation task for {}: Created".format(task["source_id"]))
return {
"success": True,
"curation_task": task
}
def delete_from_table(table_name, source_id):
# Function should be called from api_utils instead
raise NotImplementedError("Calling deprecated version")
def old_delete_from_table(table_name, source_id):
# For compatibility with legacy utils in this file
tbl_res = old_get_dmo_table(table_name)
if not tbl_res["success"]:
return tbl_res
table = tbl_res["table"]
# Check that entry exists
if not old_read_table(table_name, source_id)["success"]:
return {
"success": False,
"error": "ID {} does not exist in database".format(source_id)
}
try:
table.delete_item(Key={"source_id": source_id})
except Exception as e:
return {
"success": False,
"error": repr(e)
}
# Verify entry deleted
| |
<filename>text_renderer/api.py
import math
import os
import random
import uuid
import glob
import pygame
from pygame import freetype
import numpy as np
from scipy import ndimage
import cv2
from PIL import Image
from tqdm import tqdm
from .font import FontState, ColorState, BaselineState, BorderState, AffineTransformState, PerspectiveTransformState, SurfaceDistortionState, DistortionState
this_dir, _ = os.path.split(__file__)
MJBLEND_NORMAL = "normal"
MJBLEND_ADD = "add"
MJBLEND_SUB = "subtract"
MJBLEND_MULT = "multiply"
MJBLEND_MULTINV = "multiplyinv"
MJBLEND_SCREEN = "screen"
MJBLEND_DIVIDE = "divide"
MJBLEND_MIN = "min"
MJBLEND_MAX = "max"
pygame.init()
fontstate = FontState()
baselinestate = BaselineState()
affinestate = AffineTransformState()
perspectivestate = PerspectiveTransformState()
diststate = DistortionState()
surfdiststate = SurfaceDistortionState()
def global_distortions(arr):
# http://scipy-lectures.github.io/advanced/image_processing/#image-filtering
ds = diststate.get_sample()
blur = ds['blur']
sharpen = ds['sharpen']
sharpen_amount = ds['sharpen_amount']
noise = ds['noise']
newarr = np.minimum(np.maximum(0, arr + np.random.normal(0, noise, arr.shape)), 255)
if blur > 0.1:
newarr = ndimage.gaussian_filter(newarr, blur)
if sharpen:
newarr_ = ndimage.gaussian_filter(arr, blur/2)
newarr = newarr + sharpen_amount*(newarr - newarr_)
if ds['resample']:
sh = newarr.shape[0]
newarr = resize_image(newarr, newh=ds['resample_height'])
newarr = resize_image(newarr, newh=sh)
return newarr
def surface_distortions(arr):
ds = surfdiststate.get_sample()
blur = ds['blur']
origarr = arr.copy()
arr = np.minimum(np.maximum(0, arr + np.random.normal(0, ds['noise'], arr.shape)), 255)
# make some changes to the alpha
arr[...,1] = ndimage.gaussian_filter(arr[...,1], ds['blur'])
ds = surfdiststate.get_sample()
arr[...,0] = ndimage.gaussian_filter(arr[...,0], ds['blur'])
if ds['sharpen']:
newarr_ = ndimage.gaussian_filter(origarr[...,0], blur/2)
arr[...,0] = arr[...,0] + ds['sharpen_amount']*(arr[...,0] - newarr_)
return arr
class FillImageState(object):
"""
Handles the images used for filling the background, foreground, and border surfaces
"""
blend_amount = [0.0, 0.25] # normal dist mean, std
blend_modes = [MJBLEND_NORMAL, MJBLEND_ADD, MJBLEND_MULTINV, MJBLEND_SCREEN, MJBLEND_MAX]
blend_order = 0.5
min_textheight = 16.0 # minimum pixel height that you would find text in an image
def __init__(self, data_dir=f'{this_dir}/data/fill'):
self.data_dir = data_dir
self.im_list = os.listdir(data_dir)
def get_sample(self, surfarr):
"""
The image sample returned should not have it's aspect ratio changed, as this would never happen in real world.
It can still be resized of course.
"""
# load image
imfn = os.path.join(self.data_dir, random.choice(self.im_list))
baseim = np.array(Image.open(imfn))
# choose a colour channel or rgb2gray
if baseim.ndim == 3:
if np.random.rand() < 0.25:
baseim = rgb2gray(baseim)
else:
baseim = baseim[..., np.random.randint(0,3)]
else:
assert(baseim.ndim == 2)
imsz = baseim.shape
surfsz = surfarr.shape
# don't resize bigger than if at the original size, the text was less than min_textheight
max_factor = float(surfsz[0])/self.min_textheight
# don't resize smaller than it is smaller than a dimension of the surface
min_factor = max(float(surfsz[0] + 5)/float(imsz[0]), float(surfsz[1] + 5)/float(imsz[1]))
# sample a resize factor
factor = max(min_factor, min(max_factor, ((max_factor-min_factor)/1.5)*np.random.randn() + max_factor))
sampleim = resize_image(baseim, factor)
imsz = sampleim.shape
# sample an image patch
good = False
curs = 0
while not good:
curs += 1
if curs > 1000:
print("difficulty getting sample")
break
try:
x = np.random.randint(0,imsz[1]-surfsz[1])
y = np.random.randint(0,imsz[0]-surfsz[0])
good = True
except ValueError:
# resample factor
factor = max(min_factor, min(max_factor, ((max_factor-min_factor)/1.5)*np.random.randn() + max_factor))
sampleim = resize_image(baseim, factor)
imsz = sampleim.shape
imsample = (np.zeros(surfsz) + 255).astype(surfarr.dtype)
imsample[...,0] = sampleim[y:y+surfsz[0],x:x+surfsz[1]]
imsample[...,1] = surfarr[...,1].copy()
return {
'image': imsample,
'blend_mode': random.choice(self.blend_modes),
'blend_amount': min(1.0, np.abs(self.blend_amount[1]*np.random.randn() + self.blend_amount[0])),
'blend_order': np.random.rand() < self.blend_order,
}
def rgb2gray(rgb):
# RGB -> grey-scale (as in Matlab's rgb2grey)
try:
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
except IndexError:
try:
gray = rgb[:,:,0]
except IndexError:
gray = rgb[:,:]
return gray
def resize_image(im, r=None, newh=None, neww=None, filtering=Image.BILINEAR):
dt = im.dtype
I = Image.fromarray(im)
if r is not None:
h = im.shape[0]
w = im.shape[1]
newh = int(round(r*h))
neww = int(round(r*w))
if neww is None:
neww = int(newh*im.shape[1]/float(im.shape[0]))
if newh > im.shape[0]:
I = I.resize([neww, newh], Image.ANTIALIAS)
else:
I.thumbnail([neww, newh], filtering)
return np.array(I).astype(dt)
def add_fillimage(arr, fillimstate=FillImageState()):
"""
Adds a fill image to the array.
For blending this might be useful:
- http://stackoverflow.com/questions/601776/what-do-the-blend-modes-in-pygame-mean
- http://stackoverflow.com/questions/5605174/python-pil-function-to-divide-blend-two-images
"""
fis = fillimstate.get_sample(arr)
image = fis['image']
blend_mode = fis['blend_mode']
blend_amount = fis['blend_amount']
blend_order = fis['blend_order']
# change alpha of the image
if blend_amount > 0:
if blend_order:
image = image.astype(np.float64)
image[...,1] *= blend_amount
arr = grey_blit(image, arr, blend_mode=blend_mode)
else:
arr = arr.astype(np.float64)
arr[...,1] *= (1 - blend_amount)
arr = grey_blit(arr, image, blend_mode=blend_mode)
return arr
def grey_blit(src, dst, blend_mode=MJBLEND_NORMAL):
"""
This is for grey + alpha images
"""
# http://stackoverflow.com/a/3375291/190597
# http://stackoverflow.com/a/9166671/190597
# blending with alpha http://stackoverflow.com/questions/1613600/direct3d-rendering-2d-images-with-multiply-blending-mode-and-alpha
# blending modes from: http://www.linuxtopia.org/online_books/graphics_tools/gimp_advanced_guide/gimp_guide_node55.html
dt = dst.dtype
src = src.astype(np.single)
dst = dst.astype(np.single)
out = np.empty(src.shape, dtype = 'float')
alpha = np.index_exp[:, :, 1]
rgb = np.index_exp[:, :, 0]
src_a = src[alpha]/255.0
dst_a = dst[alpha]/255.0
out[alpha] = src_a+dst_a*(1-src_a)
old_setting = np.seterr(invalid = 'ignore')
src_pre = src[rgb]*src_a
dst_pre = dst[rgb]*dst_a
# blend:
blendfuncs = {
MJBLEND_NORMAL: lambda s, d, sa_: s + d*sa_,
MJBLEND_ADD: lambda s, d, sa_: np.minimum(255, s + d),
MJBLEND_SUB: lambda s, d, sa_: np.maximum(0, s - d),
MJBLEND_MULT: lambda s, d, sa_: s*d*sa_ / 255.0,
MJBLEND_MULTINV: lambda s, d, sa_: (255.0 - s)*d*sa_ / 255.0,
MJBLEND_SCREEN: lambda s, d, sa_: 255 - (1.0/255.0)*(255.0 - s)*(255.0 - d*sa_),
MJBLEND_DIVIDE: lambda s, d, sa_: np.minimum(255, d*sa_*256.0 / (s + 1.0)),
MJBLEND_MIN: lambda s, d, sa_: np.minimum(d*sa_, s),
MJBLEND_MAX: lambda s, d, sa_: np.maximum(d*sa_, s),
}
out[rgb] = blendfuncs[blend_mode](src_pre, dst_pre, (1-src_a))
out[rgb] /= out[alpha]
np.seterr(**old_setting)
out[alpha] *= 255
np.clip(out,0,255)
# astype('uint8') maps np.nan (and np.inf) to 0
out = out.astype(dt)
return out
def imcrop(arr, rect):
if arr.ndim > 2:
return arr[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2],...]
else:
return arr[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]
def get_bb(arr, eq=None):
if eq is None:
v = np.nonzero(arr > 0)
else:
v = np.nonzero(arr == eq)
xmin = v[1].min()
xmax = v[1].max()
ymin = v[0].min()
ymax = v[0].max()
return [xmin, ymin, xmax-xmin, ymax-ymin]
def arr_scroll(arr, dx, dy):
arr = np.roll(arr, dx, axis=1)
arr = np.roll(arr, dy, axis=0)
return arr
def get_bordershadow(bg_arr, colour, borderstate=BorderState()):
"""
Gets a border/shadow with the movement state [top, right, bottom, left].
Inset or outset is random.
"""
bs = borderstate.get_sample()
outset = bs['outset']
width = bs['width']
position = bs['position']
# make a copy
border_arr = bg_arr.copy()
# re-colour
border_arr[...,0] = colour
if outset:
# dilate black (erode white)
border_arr[...,1] = ndimage.grey_dilation(border_arr[...,1], size=(width, width))
border_arr = arr_scroll(border_arr, position[0], position[1])
# canvas = 255*n.ones(bg_arr.shape)
# canvas = grey_blit(border_arr, canvas)
# canvas = grey_blit(bg_arr, canvas)
# pyplot.imshow(canvas[...,0], cmap=cm.Greys_r)
# pyplot.show()
return border_arr, bg_arr
else:
# erode black (dilate white)
border_arr[...,1] = ndimage.grey_erosion(border_arr[...,1], size=(width, width))
return bg_arr, border_arr
def get_ga_image(surf):
r = pygame.surfarray.pixels_red(surf)
a = pygame.surfarray.pixels_alpha(surf)
r = r.reshape((r.shape[0], r.shape[1], 1))
a = a.reshape(r.shape)
return np.concatenate((r, a), axis=2).swapaxes(0, 1)
def apply_perspective_arr(arr, affstate, a_proj_type, perstate, p_proj_type, filtering=Image.BICUBIC):
img = Image.fromarray(arr)
img = img.transform(img.size, a_proj_type,
affstate,
filtering)
img = img.transform(img.size, p_proj_type,
perstate,
filtering)
arr = np.array(img)
return arr
def gen(text, sz=(800, 200),
color=random.choice(glob.glob(f'{this_dir}/data/fill/*')),
fill=f'{this_dir}/data/fill',
substring_crop=0, random_crop=True):
"""Generate text image from input text
"""
fs = fontstate.get_sample()
# clear background
bg_surf = pygame.Surface((round(2.0 * fs['size'] * len(text)),
sz[1]), pygame.SRCALPHA, 32)
font = freetype.Font(fs['font'], int(fs['size']))
# random params
text = fs['capsmode'](text) if fs['random_caps'] else text
font.underline = fs['underline']
font.underline_adjustment = fs['underline_adjustment']
font.strong = fs['strong']
font.oblique = fs['oblique']
font.strength = fs['strength']
char_spacing = fs['char_spacing']
font.antialiased = True
font.origin = True
colorstate = ColorState(color)
cs = colorstate.get_sample(2 + fs['border'])
mid_idx = int(math.floor(len(text) / 2))
curve = [0 for c in text]
rotations = [0 for c in text]
if fs['curved'] and len(text) > 1:
bs = baselinestate.get_sample()
for i, c in enumerate(text[mid_idx+1:]):
curve[mid_idx+i+1] = bs['curve'](i+1)
rotations[mid_idx+i+1] = -int(math.degrees(math.atan(bs['diff'](i+1)/float(fs['size']/2))))
for i, c in enumerate(reversed(text[:mid_idx])):
curve[mid_idx-i-1] = bs['curve'](-i-1)
rotations[mid_idx-i-1] = -int(math.degrees(math.atan(bs['diff'](-i-1)/float(fs['size']/2))))
mean_curve = sum(curve) / float(len(curve)-1)
curve[mid_idx] = -1 * mean_curve
# render text (centered)
char_bbs = []
# place middle char
rect = font.get_rect(text[mid_idx])
rect.centerx = bg_surf.get_rect().centerx
rect.centery = bg_surf.get_rect().centery + rect.height
rect.centery += curve[mid_idx]
bbrect = font.render_to(bg_surf, rect, text[mid_idx], rotation=rotations[mid_idx])
bbrect.x = rect.x
bbrect.y = rect.y - rect.height
char_bbs.append(bbrect)
# render chars to the right
last_rect = rect
for i, c in enumerate(text[mid_idx+1:]):
char_fact = 1.0
if fs['random_kerning']:
char_fact += fs['random_kerning_amount'] * np.random.randn()
newrect = font.get_rect(c)
newrect.y = last_rect.y
newrect.topleft = (last_rect.topright[0] + char_spacing*char_fact, newrect.topleft[1])
newrect.centery = max(0 + newrect.height*1, min(sz[1] - newrect.height*1, newrect.centery + curve[mid_idx+i+1]))
try:
bbrect = font.render_to(bg_surf, newrect, c, rotation=rotations[mid_idx+i+1])
except ValueError:
bbrect = font.render_to(bg_surf, newrect, c)
bbrect.x = newrect.x
bbrect.y = newrect.y - newrect.height
char_bbs.append(bbrect)
last_rect = newrect
# render chars to the left
last_rect = rect
for i, c in enumerate(reversed(text[:mid_idx])):
char_fact = 1.0
if fs['random_kerning']:
| |
<reponame>tradenity/python-sdk<gh_stars>1-10
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: <EMAIL>
"""
from __future__ import absolute_import
import re
import pprint
# python 2 and python 3 compatibility library
import six
from tradenity.api_client import ApiClient
class ReturnLineItem(object):
swagger_types = {
'id': 'str',
'meta': 'InstanceMeta',
'unit_price': 'int',
'quantity': 'int',
'product': 'Product',
'taxes': 'list[TaxRate]',
'promotions': 'list[Promotion]',
'subtotal': 'int',
'total': 'int',
'shipping_amount': 'int',
'tax_amount': 'int',
'discount_amount': 'int',
'return_operation': 'ReturnOperation'
}
attribute_map = {
'id': 'id',
'meta': '__meta',
'unit_price': 'unitPrice',
'quantity': 'quantity',
'product': 'product',
'taxes': 'taxes',
'promotions': 'promotions',
'subtotal': 'subtotal',
'total': 'total',
'shipping_amount': 'shippingAmount',
'tax_amount': 'taxAmount',
'discount_amount': 'discountAmount',
'return_operation': 'returnOperation'
}
api_client = None
def __init__(self, id=None, meta=None, unit_price=None, quantity=None, product=None, taxes=None, promotions=None, subtotal=None, total=None, shipping_amount=None, tax_amount=None, discount_amount=None, return_operation=None):
"""ReturnLineItem - a model defined in Swagger"""
self._id = id
self._meta = None
self._unit_price = None
self._quantity = None
self._product = None
self._taxes = None
self._promotions = None
self._subtotal = None
self._total = None
self._shipping_amount = None
self._tax_amount = None
self._discount_amount = None
self._return_operation = None
self.discriminator = None
if meta is not None:
self.meta = meta
self.unit_price = unit_price
self.quantity = quantity
self.product = product
if taxes is not None:
self.taxes = taxes
if promotions is not None:
self.promotions = promotions
if subtotal is not None:
self.subtotal = subtotal
if total is not None:
self.total = total
if shipping_amount is not None:
self.shipping_amount = shipping_amount
if tax_amount is not None:
self.tax_amount = tax_amount
if discount_amount is not None:
self.discount_amount = discount_amount
self.return_operation = return_operation
@property
def id(self):
if self._id:
return self._id
elif self.meta is None:
return None
else:
self._id = self.meta.href.split("/")[-1]
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def meta(self):
"""Gets the meta of this ReturnLineItem.
:return: The meta of this ReturnLineItem.
:rtype: InstanceMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this ReturnLineItem.
:param meta: The meta of this ReturnLineItem.
:type: InstanceMeta
"""
self._meta = meta
@property
def unit_price(self):
"""Gets the unit_price of this ReturnLineItem.
:return: The unit_price of this ReturnLineItem.
:rtype: int
"""
return self._unit_price
@unit_price.setter
def unit_price(self, unit_price):
"""Sets the unit_price of this ReturnLineItem.
:param unit_price: The unit_price of this ReturnLineItem.
:type: int
"""
self._unit_price = unit_price
@property
def quantity(self):
"""Gets the quantity of this ReturnLineItem.
:return: The quantity of this ReturnLineItem.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this ReturnLineItem.
:param quantity: The quantity of this ReturnLineItem.
:type: int
"""
self._quantity = quantity
@property
def product(self):
"""Gets the product of this ReturnLineItem.
:return: The product of this ReturnLineItem.
:rtype: Product
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this ReturnLineItem.
:param product: The product of this ReturnLineItem.
:type: Product
"""
self._product = product
@property
def taxes(self):
"""Gets the taxes of this ReturnLineItem.
:return: The taxes of this ReturnLineItem.
:rtype: list[TaxRate]
"""
return self._taxes
@taxes.setter
def taxes(self, taxes):
"""Sets the taxes of this ReturnLineItem.
:param taxes: The taxes of this ReturnLineItem.
:type: list[TaxRate]
"""
self._taxes = taxes
@property
def promotions(self):
"""Gets the promotions of this ReturnLineItem.
:return: The promotions of this ReturnLineItem.
:rtype: list[Promotion]
"""
return self._promotions
@promotions.setter
def promotions(self, promotions):
"""Sets the promotions of this ReturnLineItem.
:param promotions: The promotions of this ReturnLineItem.
:type: list[Promotion]
"""
self._promotions = promotions
@property
def subtotal(self):
"""Gets the subtotal of this ReturnLineItem.
:return: The subtotal of this ReturnLineItem.
:rtype: int
"""
return self._subtotal
@subtotal.setter
def subtotal(self, subtotal):
"""Sets the subtotal of this ReturnLineItem.
:param subtotal: The subtotal of this ReturnLineItem.
:type: int
"""
self._subtotal = subtotal
@property
def total(self):
"""Gets the total of this ReturnLineItem.
:return: The total of this ReturnLineItem.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ReturnLineItem.
:param total: The total of this ReturnLineItem.
:type: int
"""
self._total = total
@property
def shipping_amount(self):
"""Gets the shipping_amount of this ReturnLineItem.
:return: The shipping_amount of this ReturnLineItem.
:rtype: int
"""
return self._shipping_amount
@shipping_amount.setter
def shipping_amount(self, shipping_amount):
"""Sets the shipping_amount of this ReturnLineItem.
:param shipping_amount: The shipping_amount of this ReturnLineItem.
:type: int
"""
self._shipping_amount = shipping_amount
@property
def tax_amount(self):
"""Gets the tax_amount of this ReturnLineItem.
:return: The tax_amount of this ReturnLineItem.
:rtype: int
"""
return self._tax_amount
@tax_amount.setter
def tax_amount(self, tax_amount):
"""Sets the tax_amount of this ReturnLineItem.
:param tax_amount: The tax_amount of this ReturnLineItem.
:type: int
"""
self._tax_amount = tax_amount
@property
def discount_amount(self):
"""Gets the discount_amount of this ReturnLineItem.
:return: The discount_amount of this ReturnLineItem.
:rtype: int
"""
return self._discount_amount
@discount_amount.setter
def discount_amount(self, discount_amount):
"""Sets the discount_amount of this ReturnLineItem.
:param discount_amount: The discount_amount of this ReturnLineItem.
:type: int
"""
self._discount_amount = discount_amount
@property
def return_operation(self):
"""Gets the return_operation of this ReturnLineItem.
:return: The return_operation of this ReturnLineItem.
:rtype: ReturnOperation
"""
return self._return_operation
@return_operation.setter
def return_operation(self, return_operation):
"""Sets the return_operation of this ReturnLineItem.
:param return_operation: The return_operation of this ReturnLineItem.
:type: ReturnOperation
"""
self._return_operation = return_operation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReturnLineItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReturnLineItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
@classmethod
def get_api_client(cls):
if cls.api_client is None:
cls.api_client = ApiClient.instance()
return cls.api_client
@classmethod
def find_all(cls, **kwargs):
return cls.list_all_return_line_items(**kwargs)
@classmethod
def find_all_by(cls, **kwargs):
return cls.list_all_return_line_items(**kwargs)
@classmethod
def find_one_by(cls, **kwargs):
results = cls.list_all_return_line_items(**kwargs)
if len(results) > 0:
return results[0]
@classmethod
def find_by_id(cls, id):
return cls.get_return_line_item_by_id(id)
def create(self):
new_instance = self.create_return_line_item(self)
self.id = new_instance.id
return self
def update(self):
return self.update_return_line_item_by_id(self.id, self)
def delete(self):
return self.delete_return_line_item_by_id(self.id)
@classmethod
def create_return_line_item(cls, return_line_item, **kwargs):
"""Create ReturnLineItem
Create a new ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_return_line_item(return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_return_line_item_with_http_info(return_line_item, **kwargs)
else:
(data) = cls._create_return_line_item_with_http_info(return_line_item, **kwargs)
return data
@classmethod
def _create_return_line_item_with_http_info(cls, return_line_item, **kwargs):
"""Create ReturnLineItem
Create a new ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_return_line_item_with_http_info(return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item' is set
if ('return_line_item' not in params or
params['return_line_item'] is None):
raise ValueError("Missing the required parameter `return_line_item` when calling `create_return_line_item`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'return_line_item' in params:
body_params = params['return_line_item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnLineItem',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def delete_return_line_item_by_id(cls, return_line_item_id, **kwargs):
"""Delete ReturnLineItem
Delete an instance of ReturnLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_return_line_item_by_id(return_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: | |
<filename>tests/test_psygnal.py
import gc
import time
import weakref
from functools import partial, wraps
from inspect import Signature
from types import FunctionType
from typing import Optional
from unittest.mock import MagicMock, call
import pytest
from psygnal import Signal, SignalInstance
from psygnal._signal import _get_method_name
def stupid_decorator(fun):
def _fun(*args):
fun(*args)
_fun.__annotations__ = fun.__annotations__
_fun.__name__ = "f_no_arg"
return _fun
def good_decorator(fun):
@wraps(fun)
def _fun(*args):
fun(*args)
return _fun
# fmt: off
class Emitter:
no_arg = Signal()
one_int = Signal(int)
two_int = Signal(int, int)
str_int = Signal(str, int)
no_check = Signal(str, check_nargs_on_connect=False, check_types_on_connect=False)
class MyObj:
def f_no_arg(self): ...
def f_str_int_vararg(self, a: str, b: int, *c): ...
def f_str_int_any(self, a: str, b: int, c): ...
def f_str_int_kwarg(self, a: str, b: int, c=None): ...
def f_str_int(self, a: str, b: int): ...
def f_str_any(self, a: str, b): ...
def f_str(self, a: str): ...
def f_int(self, a: int): ...
def f_any(self, a): ...
def f_int_int(self, a: int, b: int): ...
def f_str_str(self, a: str, b: str): ...
def f_arg_kwarg(self, a, b=None): ...
def f_vararg(self, *a): ...
def f_vararg_varkwarg(self, *a, **b): ...
def f_vararg_kwarg(self, *a, b=None): ...
@stupid_decorator
def f_int_decorated_stupid(self, a: int): ...
@good_decorator
def f_int_decorated_good(self, a: int): ...
f_any_assigned = lambda self, a: None # noqa
def f_no_arg(): ...
def f_str_int_vararg(a: str, b: int, *c): ...
def f_str_int_any(a: str, b: int, c): ...
def f_str_int_kwarg(a: str, b: int, c=None): ...
def f_str_int(a: str, b: int): ...
def f_str_any(a: str, b): ...
def f_str(a: str): ...
def f_int(a: int): ...
def f_any(a): ...
def f_int_int(a: int, b: int): ...
def f_str_str(a: str, b: str): ...
def f_arg_kwarg(a, b=None): ...
def f_vararg(*a): ...
def f_vararg_varkwarg(*a, **b): ...
def f_vararg_kwarg(*a, b=None): ...
class MyReceiver:
expect_signal = None
expect_sender = None
expect_name = None
def assert_sender(self, *a):
assert Signal.current_emitter() is self.expect_signal
assert self.expect_name in repr(Signal.current_emitter())
assert Signal.current_emitter().instance is self.expect_sender
assert Signal.sender() is self.expect_sender
assert Signal.current_emitter()._name is self.expect_name
def assert_not_sender(self, *a):
# just to make sure we're actually calling it
assert Signal.current_emitter().instance is not self.expect_sender
# fmt: on
def test_basic_signal():
"""standard Qt usage, as class attribute"""
emitter = Emitter()
mock = MagicMock()
emitter.one_int.connect(mock)
emitter.one_int.emit(1)
mock.assert_called_once_with(1)
mock.reset_mock()
# calling directly also works
emitter.one_int(1)
mock.assert_called_once_with(1)
def test_decorator():
emitter = Emitter()
@emitter.one_int.connect
def boom(v: int):
raise ValueError
@emitter.one_int.connect(check_nargs=False)
def bad_cb(a, b, c):
...
with pytest.raises(ValueError):
emitter.one_int.emit(1)
def test_misc():
emitter = Emitter()
assert isinstance(Emitter.one_int, Signal)
assert isinstance(emitter.one_int, SignalInstance)
with pytest.raises(AttributeError):
emitter.one_int.asdf
with pytest.raises(AttributeError):
emitter.one_int.asdf
def test_getattr():
s = Signal()
with pytest.raises(AttributeError):
s.not_a_thing
def test_signature_provided():
s = Signal(Signature())
assert s.signature == Signature()
with pytest.warns(UserWarning):
s = Signal(Signature(), 1)
def test_emit_checks():
emitter = Emitter()
emitter.one_int.emit(check_nargs=False)
emitter.one_int.emit()
with pytest.raises(TypeError):
emitter.one_int.emit(check_nargs=True)
emitter.one_int.emit(1)
emitter.one_int.emit(1, 2, check_nargs=False)
emitter.one_int.emit(1, 2)
with pytest.raises(TypeError):
emitter.one_int.emit(1, 2, check_nargs=True)
with pytest.raises(TypeError):
emitter.one_int.emit("sdr", check_types=True)
emitter.one_int.emit("sdr", check_types=False)
def test_basic_signal_blocked():
"""standard Qt usage, as class attribute"""
emitter = Emitter()
mock = MagicMock()
emitter.one_int.connect(mock)
emitter.one_int.emit(1)
mock.assert_called_once_with(1)
mock.reset_mock()
with emitter.one_int.blocked():
emitter.one_int.emit(1)
mock.assert_not_called()
def test_disconnect():
emitter = Emitter()
mock = MagicMock()
with pytest.raises(ValueError) as e:
emitter.one_int.disconnect(mock, missing_ok=False)
assert "slot is not connected" in str(e)
emitter.one_int.disconnect(mock)
emitter.one_int.connect(mock)
emitter.one_int.emit(1)
mock.assert_called_once_with(1)
mock.reset_mock()
emitter.one_int.disconnect(mock)
emitter.one_int.emit(1)
mock.assert_not_called()
def test_slot_types():
emitter = Emitter()
assert len(emitter.one_int._slots) == 0
emitter.one_int.connect(lambda x: None)
assert len(emitter.one_int._slots) == 1
emitter.one_int.connect(f_int)
assert len(emitter.one_int._slots) == 2
# connecting same function twice is (currently) OK
emitter.one_int.connect(f_int)
assert len(emitter.one_int._slots) == 3
assert isinstance(emitter.one_int._slots[-1][0], FunctionType)
# bound methods
obj = MyObj()
emitter.one_int.connect(obj.f_int)
assert len(emitter.one_int._slots) == 4
assert isinstance(emitter.one_int._slots[-1][0], tuple)
assert isinstance(emitter.one_int._slots[-1][0][0], weakref.ref)
with pytest.raises(TypeError):
emitter.one_int.connect("not a callable") # type: ignore
def test_basic_signal_with_sender_receiver():
"""standard Qt usage, as class attribute"""
emitter = Emitter()
receiver = MyReceiver()
receiver.expect_sender = emitter
receiver.expect_signal = emitter.one_int
receiver.expect_name = "one_int"
assert Signal.current_emitter() is None
emitter.one_int.connect(receiver.assert_sender)
emitter.one_int.emit(1)
# back to none after the call is over.
assert Signal.current_emitter() is None
emitter.one_int.disconnect()
# sanity check... to make sure that methods are in fact being called.
emitter.one_int.connect(receiver.assert_not_sender)
with pytest.raises(AssertionError):
emitter.one_int.emit(1)
def test_basic_signal_with_sender_nonreceiver():
"""standard Qt usage, as class attribute"""
emitter = Emitter()
nr = MyObj()
emitter.one_int.connect(nr.f_no_arg)
emitter.one_int.connect(nr.f_int)
emitter.one_int.connect(nr.f_vararg_varkwarg)
emitter.one_int.emit(1)
# emitter.one_int.connect(nr.two_int)
def test_signal_instance():
"""make a signal instance without a class"""
signal = SignalInstance((int,))
mock = MagicMock()
signal.connect(mock)
signal.emit(1)
mock.assert_called_once_with(1)
signal = SignalInstance()
mock = MagicMock()
signal.connect(mock)
signal.emit()
mock.assert_called_once_with()
def test_signal_instance_error():
"""without a class"""
signal = Signal()
mock = MagicMock()
with pytest.raises(AttributeError) as e:
signal.connect(mock)
assert "Signal() class attribute" in str(e)
@pytest.mark.parametrize(
"slot",
[
"f_no_arg",
"f_int_decorated_stupid",
"f_int_decorated_good",
"f_any_assigned",
"partial",
],
)
def test_weakref(slot):
"""Test that a connected method doesn't hold strong ref."""
emitter = Emitter()
obj = MyObj()
assert len(emitter.one_int) == 0
emitter.one_int.connect(
partial(obj.f_int_int, 1) if slot == "partial" else getattr(obj, slot)
)
assert len(emitter.one_int) == 1
emitter.one_int.emit(1)
assert len(emitter.one_int) == 1
del obj
gc.collect()
emitter.one_int.emit(1) # this should trigger deletion
assert len(emitter.one_int) == 0
def test_norm_slot():
e = Emitter()
r = MyObj()
normed1 = e.one_int._normalize_slot(r.f_any)
normed2 = e.one_int._normalize_slot(normed1)
normed3 = e.one_int._normalize_slot((r, "f_any", None))
normed4 = e.one_int._normalize_slot((weakref.ref(r), "f_any", None))
assert normed1 == (weakref.ref(r), "f_any", None)
assert normed1 == normed2 == normed3 == normed4
assert e.one_int._normalize_slot(f_any) == f_any
ALL = {n for n, f in locals().items() if callable(f) and n.startswith("f_")}
COUNT_INCOMPATIBLE = {
"no_arg": ALL - {"f_no_arg", "f_vararg", "f_vararg_varkwarg", "f_vararg_kwarg"},
"one_int": {
"f_int_int",
"f_str_any",
"f_str_int_any",
"f_str_int_kwarg",
"f_str_int_vararg",
"f_str_int",
"f_str_str",
},
"str_int": {"f_str_int_any"},
}
SIG_INCOMPATIBLE = {
"no_arg": {"f_int_int", "f_int", "f_str_int_any", "f_str_str"},
"one_int": {
"f_int_int",
"f_str_int_any",
"f_str_int_vararg",
"f_str_str",
"f_str_str",
"f_str",
},
"str_int": {"f_int_int", "f_int", "f_str_int_any", "f_str_str"},
}
@pytest.mark.parametrize("typed", ["typed", "untyped"])
@pytest.mark.parametrize("func_name", ALL)
@pytest.mark.parametrize("sig_name", ["no_arg", "one_int", "str_int"])
@pytest.mark.parametrize("mode", ["func", "meth", "partial"])
def test_connect_validation(func_name, sig_name, mode, typed):
from functools import partial
if mode == "meth":
func = getattr(MyObj(), func_name)
elif mode == "partial":
func = partial(globals()[func_name])
else:
func = globals()[func_name]
e = Emitter()
check_types = typed == "typed"
signal: SignalInstance = getattr(e, sig_name)
bad_count = COUNT_INCOMPATIBLE[sig_name]
bad_sig = SIG_INCOMPATIBLE[sig_name]
if func_name in bad_count or check_types and func_name in bad_sig:
with pytest.raises(ValueError) as er:
signal.connect(func, check_types=check_types)
assert "Accepted signature:" in str(er)
return
signal.connect(func, check_types=check_types)
args = (p.annotation() for p in signal.signature.parameters.values())
signal.emit(*args)
def test_connect_lambdas():
e = Emitter()
assert len(e.two_int._slots) == 0
e.two_int.connect(lambda: None)
e.two_int.connect(lambda x: None)
assert len(e.two_int._slots) == 2
e.two_int.connect(lambda x, y: None)
e.two_int.connect(lambda x, y, z=None: None)
assert len(e.two_int._slots) == 4
e.two_int.connect(lambda x, y, *z: None)
e.two_int.connect(lambda *z: None)
assert len(e.two_int._slots) == 6
e.two_int.connect(lambda *z, **k: None)
assert len(e.two_int._slots) == 7
with pytest.raises(ValueError):
e.two_int.connect(lambda x, y, z: None)
def test_mock_connect():
e = Emitter()
e.one_int.connect(MagicMock())
# fmt: off
class TypeA: ...
class TypeB(TypeA): ...
class TypeC(TypeB): ...
class Rcv:
def methodA(self, obj: TypeA): ...
def methodA_ref(self, obj: 'TypeA'): ...
def methodB(self, obj: TypeB): ...
def methodB_ref(self, obj: 'TypeB'): ...
def methodOptB(self, obj: Optional[TypeB]): ...
def methodOptB_ref(self, obj: 'Optional[TypeB]'): ...
def methodC(self, obj: TypeC): ...
def methodC_ref(self, obj: 'TypeC'): ...
class Emt:
signal = Signal(TypeB)
# fmt: on
def test_forward_refs_type_checking():
e = Emt()
r = Rcv()
e.signal.connect(r.methodB, check_types=True)
e.signal.connect(r.methodB_ref, check_types=True)
e.signal.connect(r.methodOptB, check_types=True)
e.signal.connect(r.methodOptB_ref, check_types=True)
e.signal.connect(r.methodC, check_types=True)
e.signal.connect(r.methodC_ref, check_types=True)
# signal is emitting a TypeB, but method is expecting a typeA
assert not issubclass(TypeA, TypeB)
# typeA is not a TypeB, so we get an error
with pytest.raises(ValueError):
e.signal.connect(r.methodA, check_types=True)
with pytest.raises(ValueError):
e.signal.connect(r.methodA_ref, check_types=True)
def test_checking_off():
e = Emitter()
# the no_check signal was instantiated with check_[nargs/types] = False
@e.no_check.connect
def bad_in_many_ways(x: int, y, z):
...
def test_keyword_only_not_allowed():
e = Emitter()
def f(a: int, *, b: int):
...
with pytest.raises(ValueError) as er:
e.two_int.connect(f)
assert "Required KEYWORD_ONLY parameters not allowed" in str(er)
def test_unique_connections():
e = Emitter()
assert len(e.one_int._slots) == 0
e.one_int.connect(f_no_arg, unique=True)
assert len(e.one_int._slots) == 1
e.one_int.connect(f_no_arg, unique=True)
assert len(e.one_int._slots) == 1
with pytest.raises(ValueError):
e.one_int.connect(f_no_arg, unique="raise")
assert len(e.one_int._slots) == 1
e.one_int.connect(f_no_arg)
assert len(e.one_int._slots) == 2
def test_asynchronous_emit():
e = Emitter()
a = []
def slow_append(arg: int):
time.sleep(0.1)
a.append(arg)
mock = MagicMock(wraps=slow_append)
e.no_arg.connect(mock, unique=False)
assert not Signal.current_emitter()
value = 42
thread = e.no_arg.emit(value, asynchronous=True)
mock.assert_called_once()
assert Signal.current_emitter() is e.no_arg
# dude, you have to wait.
assert not a
if thread:
thread.join()
assert a == [value]
assert not Signal.current_emitter()
def test_sig_unavailable():
"""In some cases, signature.inspect() fails on a callable, (many builtins).
We should still connect, but with a warning.
"""
e = Emitter()
with pytest.warns(None):
e.one_int.connect(vars, check_nargs=False) # no warning
with pytest.warns(UserWarning):
e.one_int.connect(vars)
# we've special cased print... due to frequency of use.
with pytest.warns(None):
e.one_int.connect(print) # no warning
def test_pause():
"""Test that we can pause, and resume emission of (possibly reduced) args."""
emitter = Emitter()
mock = MagicMock()
emitter.one_int.connect(mock)
emitter.one_int.emit(1)
mock.assert_called_once_with(1)
mock.reset_mock()
emitter.one_int.pause()
emitter.one_int.emit(1)
emitter.one_int.emit(2)
emitter.one_int.emit(3)
mock.assert_not_called()
emitter.one_int.resume()
mock.assert_has_calls([call(1), call(2), call(3)])
mock.reset_mock()
with emitter.one_int.paused(lambda a, b: (a[0].union(set(b)),), (set(),)):
emitter.one_int.emit(1)
emitter.one_int.emit(2)
emitter.one_int.emit(3)
mock.assert_called_once_with({1, 2, 3})
mock.reset_mock()
emitter.one_int.pause()
emitter.one_int.resume()
mock.assert_not_called()
def test_resume_with_initial():
emitter = Emitter()
mock = MagicMock()
emitter.one_int.connect(mock)
with emitter.one_int.paused(lambda a, | |
<filename>tideturb/tideturb.py
import pickle
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import os
import tqdm
mpl.use("Agg")
"""This is a model of a turbidity current influenced by tidal flows in
a submarine canyon. The two-layer shallow water equation system is
employed. The upper layer is an ambient water, and the lower layer
is a turbidity current.
.. codeauthor:: <NAME>
Example
--------
from tideturb import TwoLayerTurbidityCurrent, Grid, load_model
from matplotlib import pyplot as plt
grid = Grid(number_of_grids=500, spacing=10.0)
grid.eta = grid.x * -0.01
tc = TwoLayerTurbidityCurrent(
grid=grid,
turb_vel=2.0,
ambient_vel=0.3,
turb_thick=5.0,
ambient_thick=100.0,
concentration=0.01,
alpha=0.02,
implicit_repeat_num=20,
)
steps = 500
for i in range(steps):
tc.plot()
plt.savefig('test6/tidal_flood_{:04d}'.format(i))
tc.run_one_step(dt=10.0)
print("", end='\r')
print('{:.1f}% finished.'.format(i / steps * 100), end='\r')
tc.plot()
plt.savefig('test6/tidal_flood_{:04d}'.format(i))
plt.show()
tc.save('test6_5000sec')
"""
class Grid:
# Grid used for the TwoLayerTurbidityCurrent
# This class store x coordinates and other flow parameters to output
def __init__(
self, number_of_grids=100, start=0.0, end=None, spacing=1.0, filename=None
):
""" Constractor of the class Grid
Parameters
----------------
number_of_grids : int, optional
Number of grid nodes in the computational domain.
start : float, optional
A value of x-coordinate at the landward end of the
computational domain (m).
end : float, optional
A value of x-coordinate at the landward end of the
computational domain (m).
spacing : float, optional
Spacing of the grid nodes. This is ignored when the
parameter "end" is specified.
filename : string, optional
A name of file for topographic data in CSV format.
The first column is for x coordinate (distance from
the upstream end), and the second column is for
topographic elevation. Unit is meter.
"""
try:
# set x coordinate
self.number_of_grids = number_of_grids
self.dx = spacing
if end is None:
self.x = np.arange(start, start + spacing *
number_of_grids, spacing)
else:
self.x = np.arange(start, end, spacing)
# bed elevation
self.eta = np.zeros(self.x.shape)
# Load initial topography
if filename is not None:
topo_data = np.loadtxt(filename, delimiter=",")
self.eta = np.interp(self.x, topo_data[:, 0], topo_data[:, 1])
else:
self.grid.eta = self.grid.x * -0.05 # constant slope
except ValueError as ve:
print(ve)
# flow parameters
self.U_a = np.zeros(self.x.shape) # velocity of ambient water
self.U_t = np.zeros(self.x.shape) # velocity of a turbidity current
self.h_a = np.zeros(self.x.shape) # height of ambient water
self.h_t = np.zeros(self.x.shape) # height of a turbidity current
self.C = np.zeros(self.x.shape) # sediment concentration
# indices of core grids (excluding boundary grids)
self.core_nodes = np.arange(1, len(self.x) - 1, dtype="int")
self.core_links = np.arange(1, len(self.x) - 2, dtype="int")
class TwoLayerTurbidityCurrent:
"""Two layer model of a turbidity current and an overlying tidal current
"""
def __init__(
self,
grid=None,
ambient_vel=0.0,
ambient_thick=20,
turb_vel=1.0,
turb_thick=10,
concentration=0.01,
R=1.65,
g=9.81,
Cf=0.004,
nu_t=1.0 * 10 ** -4,
nu_a=0.8,
Ds=50 * 10 ** -6,
nu=1.010 * 10 ** -6,
h_init=0.0001,
C_init=0.0001,
h_e=0.01,
alpha=0.01,
implicit_repeat_num=5,
entrainment="gp1991field",
):
""" Constractor for TwoLayerTurbidityCurrent
Parameters
-----------
grid : Grid, optional
Grid object that is used for calculation. If this parameter
is not specified, the default values of Grid object are used.
ambient_vel : float, optional
Flow velocity of ambient water (supposing tidal current) at
the upstream end (m/s).
ambient_thick : float, optional
Initial thickness of ambient water (m).
turb_vel : float, optional
Velocity of a turbidity current at the upstream end (m/s).
turb_thick : float, optional
Thickness of a turbidity current at the upstream end (m/s).
concentration : float, optional
Sediment concentration in a turbidity current.
R : float, optional
Submerged specific density of sediment particles. 1.65 for
quartz grains.
g : float, optional
Gravity acceleration. Default value is 9.81 (m/s)
Cf : float, optional
Bed friction coefficient. Default value is 0.004.
nu : float, optional
Kinematic viscosity of water. Default value is 1.010*10**-6
Ds : float, optional
Sediment particle diamter. Default value is 50 microns.
nu_t : float, optional
Eddy viscosity at the interface between two layers
nu_a : float, optional
Artificial viscosity parameter
h_init : float, optional
Dummy flow thickness of turbidity current. This is needed for
numerical stability.
h_e : float, optional
Criterion for judging wet and dry grids
alpha : float, optional
Coefficient to determine the time step length considering
Courant Number. Default is 0.01.
implicit_repeat_number : float, optional
Number of repetition for calculating implicit scheme. Default
is 5.
entrainment : str, optional(default="gp1991field")
Choice of the sediment entrainment function
Default is Garcia and Parker (1991) with a limit coefficient
introduced by Fildani et al. (2006).
Other choices are:
"gp1991": Garcia and Parker (1991)
"vanrijn1984": <NAME> (1984) (not implemented)
"Leeuw2020" : Leeuw et al. (2020) (not implemented)
"""
try:
# set a grid
if grid is None:
self.grid = Grid()
else:
self.grid = grid
# store parameters
self.R = R
self.g = g
self.Cf = Cf
self.nu_t = nu_t
self.nu_a = nu_a
self.Ds = Ds
self.nu = nu
self.h_init = h_init
self.C_init = C_init
self.ambient_vel = ambient_vel
self.ambient_thick = ambient_thick
self.turb_vel = turb_vel
self.turb_thick = turb_thick
self.concentration = concentration
self.dx = self.grid.dx
self.alpha = alpha
self.implicit_repeat_num = implicit_repeat_num
self.dt = 0.1
self.elapsed_time = 0.0
self.h_e = h_e
self.entrainment = entrainment
# Calculate subordinate parameters
self.ws = self.get_ws()
except Exception as exc:
print(type(exc)) # the exception instance
print(exc.args) # arguments stored in .args
print(exc)
# Set main variables
# The subscript "node" denotes variables at grids. The subscript
# "link" denotes variables at half-staggered point between grids.
# This model employs staggered grids. Flow heights are calculated
# at "nodes", and flow velocities are calculated at "link".
# The "node" values and "link" values are mapped each other by
# averaging.
# h_node[0,:] is the ambient flow height h_a, and h_node[1, :] is
# the height of the turbidity current.
# U_link[0,:] is the ambient flow velocity U_a, and U_link[1, :] is
# the velocity of the turbidity current.
# main variables at nodes and links
self.h_node = np.zeros([2, self.grid.x.shape[0]])
self.h_link = np.zeros([2, self.grid.x.shape[0] - 1])
self.U_node = np.zeros([2, self.grid.x.shape[0]])
self.U_link = np.zeros([2, self.grid.x.shape[0] - 1])
self.C_node = np.zeros([2, self.grid.x.shape[0]])
self.C_link = np.zeros([2, self.grid.x.shape[0] - 1])
# spatial derivatives
self.dhdx = np.zeros(self.h_node.shape)
self.dUdx = np.zeros(self.U_link.shape)
self.dCdx = np.zeros(self.C_node.shape)
# non advection terms
self.G_h = np.zeros(self.h_node.shape)
self.G_U = np.zeros(self.U_link.shape)
self.G_C = np.zeros(self.C_node.shape)
# Set core nodes and links. Only these core grids are used for
# calculation.
# Other nodes and links are used to describe boundary conditions.
core_nodes = np.tile(self.grid.core_nodes, (self.h_node.shape[0], 1))
core_links = np.tile(self.grid.core_links, (self.U_link.shape[0], 1))
self.core_nodes = tuple(
(
np.array([np.arange(self.h_node.shape[0], dtype="int")]).T
* np.ones(core_nodes.shape, dtype="int"),
core_nodes,
)
)
self.core_links = tuple(
(
np.array([np.arange(self.U_link.shape[0], dtype="int")]).T
* np.ones(core_links.shape, dtype="int"),
core_links,
)
)
# Set initial and boundary conditions
self.h_node[1, 0] = turb_thick
# self.h_node[1, 1:] = h_init * np.ones(self.h_node[1, 1:].shape)
self.h_node[1, 1:] = turb_thick * np.ones(self.h_node[1, 1:].shape)
self.h_node[0, :] = (
ambient_thick + turb_thick - self.grid.eta - self.h_node[1, :]
) # initial water surface is flat
self.h_link[0, :] = (self.h_node[0, :-1] + self.h_node[0, 1:]) / 2.0
self.U_link[0, :] = ambient_vel * ambient_thick / self.h_link[0, :]
self.U_link[1, 0] = turb_vel
self.U_link[1, 1:] = turb_vel * np.ones(self.U_link[1, 1:].shape)
self.C_node[1, 0] = concentration
# self.C_node[1, 1:] = np.ones(self.C_node.shape[1] - 1) * self.C_init
self.C_node[1, 1:] = concentration * np.ones(self.C_node[1, 1:].shape)
self.dhdx[:, 1:-1] = (self.h_node[:, :-2] -
self.h_node[:, 2:]) / (2 * self.dx)
self.dhdx[:, 0] = self.dhdx[:, 1]
self.dhdx[:, -1] = self.dhdx[:, -2]
# set topography
self.eta_node = self.grid.eta
self.eta_link = (self.eta_node[0:-1] + self.eta_node[1:]) / 2.0
# Map node and link values each other
self.update_values(
self.h_node, self.h_link, self.U_node, self.U_link, self.C_node, self.C_link
)
# variables to store calculation results temporary
self.h_node_temp = self.h_node.copy()
self.h_link_temp = self.h_link.copy()
self.U_node_temp = self.U_node.copy()
self.U_link_temp = self.U_link.copy()
self.C_node_temp = self.C_node.copy()
self.C_link_temp = self.C_link.copy()
self.dhdx_temp = self.dhdx.copy()
self.dUdx_temp = self.dUdx.copy()
self.dCdx_temp = self.dCdx.copy()
# Store variables in the grid object
self.copy_temp_to_main_variables()
# Making figures
self.fig, (self.axL, self.axM, self.axR) = plt.subplots(
ncols=3, figsize=(25, 6)
)
def calc_time_step(self):
"""calculate time step length based on CFL condition with
a safe rate alpha
Return
---------
dt : float
A time step length to be used as dt_local.
"""
advel = np.abs(self.U_link) + np.sqrt(
self.R * self.C_link * self.g * self.h_link
)
dt = | |
When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: V1PersistentVolumeClaimList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_persistent_volume_claim" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v1/persistentvolumeclaims'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1PersistentVolumeClaimList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_namespaced_persistent_volume(self, **kwargs):
"""
list or watch objects of kind PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_namespaced_persistent_volume(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: V1PersistentVolumeList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_persistent_volume" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v1/persistentvolumes'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1PersistentVolumeList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_namespaced_persistent_volume(self, body, **kwargs):
"""
create a PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_namespaced_persistent_volume(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1PersistentVolume body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_persistent_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_persistent_volume`")
resource_path = '/api/v1/persistentvolumes'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = {}
files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='V1PersistentVolume',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def deletecollection_namespaced_persistent_volume(self, **kwargs):
"""
delete collection of PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.deletecollection_namespaced_persistent_volume(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletecollection_namespaced_persistent_volume" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v1/persistentvolumes'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def read_namespaced_persistent_volume(self, name, **kwargs):
"""
read the specified PersistentVolume
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_namespaced_persistent_volume(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the PersistentVolume (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'export', 'exact']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_persistent_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_persistent_volume`")
resource_path = '/api/v1/persistentvolumes/{name}'.replace('{format}', 'json')
| |
<filename>NNSubsampling/NNSubsampling.py
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 14:36:48 2017
@author: <NAME>
"""
from __future__ import print_function
import numpy as np
from sklearn import preprocessing
import time
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import random
try: import cPickle as pickle
except: import pickle
from math import ceil
from math import sqrt
try: from pyflann import *
except: pass
try: from pykdtree.kdtree import KDTree
except: pass
try: from sklearn.neighbors import NearestNeighbors
except: pass
try: from annoy import AnnoyIndex
except: pass
try: from scipy.spatial import cKDTree
except: pass
try: import nmslib
except: pass
'''
Helper functions
'''
def get_data_process(li, list_of_index):
"""
Select features of a list based on index set
"""
result = []
for entry in li:
result.append([entry[i] for i in list_of_index])
return result
def get_array_based_on_index(li, list_of_index):
"""
Select entries from a list based on index set
"""
return np.asarray([li[i] for i in list_of_index])
def remove_list_from_list(a,b):
"""
Remove entries in list a that's also in list b
"""
return list(set(a)-set(b))
def chunker(seq, size):
"""
break a list (seq) into a set of equally sized (size) lists
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
'''
subsample based on kD-tree
'''
def get_subsampling_index2(data_process, standard_scale = True, cutoff_sig = 0.02, rate = 0.3, \
method = "pykdtree", verbose = 1):
"""
Using Nearest-Neighbor search based algorithm, find the list of indices of the subsampled dataset
Parameters
-------------
data_process: List. the list of datapoints, with selected features
standard_scale [True]: Boolean. Whether to apply standard scaler to the dataset prior to subsampling
cutoff_sig [0.02]: Float. cutoff significance. the cutoff distance equals to the Euclidean
norm of the standard deviations in all dimensions of the data points
rate [0.3]: Float. possibility of deletion
method ["pykdtree"]: String. which backend nearest neighbour model to use.
possible choices: ["pykdtree", "nmslib", "sklearn", "scipy", "annoy", "flann"]
verbose [1]: integer. level of verbosity
Return
-------------
overall_keep_list: The list of indices of the final subsampled entries
"""
if verbose >=1:
print("Started NN-subsampling, original length: {}".format(len(data_process)))
method = method.lower()
start = time.time()
if method == "flann":
if verbose >=1:
print("use flann backend")
elif method == "pykdtree":
if verbose >=1:
print("use pykdtree backend")
elif method == "sklearn":
if verbose >=1:
print("use slearn nearest neighbors backend")
elif method == "scipy":
if verbose >=1:
print("use scipy cKDTree backend")
elif method == "annoy":
if verbose >=1:
print("use annoy backend")
elif method == "nmslib":
if verbose >=1:
print("use nmslib backend")
else:
print("method {} not impletemented".format(method))
raise NotImplemented
# apply standard scaling
if standard_scale:
if verbose >= 2:
print("Subample with standard scaled data")
data_process = StandardScaler().fit_transform(np.asarray(data_process).copy())
else:
if verbose >= 2:
print("Subample with original data")
data_process = np.asarray(data_process).copy()
#set cutoff distance
list_of_descs = zip(*data_process)
sum_std2 = 0.
for descs in list_of_descs:
temp_std = np.std(descs)
sum_std2 += temp_std**2
cutoff = cutoff_sig * np.sqrt(sum_std2)
#initialize the index
overall_keep_list = np.arange(len(data_process)).tolist()
keep_going = True
iter_count = 1
while keep_going:
if verbose >= 2:
print('start iteration {}, total length: {}'.format(iter_count, len(overall_keep_list)))
start_cycle = time.time()
temp_data_process = get_array_based_on_index(data_process.copy(), overall_keep_list)
#build and query nearest neighbour model
if method == "flann":
flann = FLANN()
indices, distances = flann.nn(temp_data_process, temp_data_process, 2, algorithm="kmeans")
elif method == "scipy":
kd_tree = cKDTree(temp_data_process)
distances, indices = kd_tree.query(temp_data_process, k=2)
elif method == "pykdtree":
kd_tree = KDTree(temp_data_process,leafsize=6)
distances, indices = kd_tree.query(temp_data_process, k=2)
elif method == "sklearn":
nbrs = NearestNeighbors(n_neighbors=2, algorithm='kd_tree',n_jobs=-1).fit(temp_data_process)
distances, indices = nbrs.kneighbors(temp_data_process)
elif method == "annoy":
annoy = AnnoyIndex(len(temp_data_process[0]),metric='euclidean')
for i in range(len(temp_data_process)):
annoy.add_item(i,temp_data_process[i])
annoy.build(1)
distances = []
indices = []
for i in range(len(temp_data_process)):
temp_index, temp_dist = annoy.get_nns_by_vector(temp_data_process[i], 2, include_distances=True)
indices.append([i, temp_index[1]])
distances.append([0.0, temp_dist[1]])
elif method == "nmslib":
index = nmslib.init(method='hnsw', space='l2')
index.addDataPointBatch(temp_data_process)
index.createIndex( print_progress=False)
neighbours = index.knnQueryBatch(temp_data_process, k=2)
distances = []
indices = []
for item in neighbours:
indices.append(item[0])
distances.append(item[1])
else:
raise NotImplemented
# if distance between each point and its nearest neighbor is below cutoff distance,
# add the nearest neighbout to the candidate removal list
remove_index_li = []
index_li = []
for index, distance in zip(indices, distances):
index_li.append(index[0])
if distance[1] <= cutoff:
remove_index_li.append(index[1])
# randomly select datapoints in the candidate removal list (based on rate)
# and form the final removal list of this iteration
# stop the cycle if the final removal list is empty
temp_num = int(ceil(float(len(remove_index_li))*rate))
if temp_num == 0:
keep_going = False
remove_index_li = random_subsampling(remove_index_li,temp_num)
temp_keep_list = remove_list_from_list(index_li, remove_index_li)
overall_keep_list = [overall_keep_list[i] for i in temp_keep_list ]
if verbose >= 2:
print('end iteration {}. length: {}\t time:{}'.format(iter_count, len(overall_keep_list), time.time()-start_cycle))
iter_count += 1
if verbose >= 1:
print('end NN-subsampling. length: {}\t time:{}'.format(len(overall_keep_list), time.time()-start))
return overall_keep_list
def subsampling(data, list_desc = [], standard_scale = True, cutoff_sig = 0.05, rate = 0.3, \
method = "pykdtree", verbose = 1):
'''
Run the NN-based subsampling algorithm to a list of data points and
return the resulting list of subsampled data points
Parameters
-------------
data: List. the original list of data points
list_desc [[] (empty list)]: List.
the indices of descriptors (features) of the datapoints.
The algorithm would subsample based only on these descriptors
(although other features will still be kept in the resulting subsampled dataset)
If the list is empty, then all feature will be taken into account
standard_scale [True]: Boolean. Whether to apply standard scaler to the dataset prior to subsampling
cutoff_sig [0.02]: Float. cutoff significance. the cutoff distance equals to the Euclidean
norm of the standard deviations in all dimensions of the data points
rate [0.3]: Float. possibility of deletion
method ["pykdtree"]: String. which backend nearest neighbour model to use.
possible choices: ["pykdtree", "nmslib", "sklearn", "scipy", "annoy", "flann"]
verbose [1]: integer. level of verbosity
Return
-------------
sampling_result : the result list of subsampled data points
'''
if len(list_desc) == 0:
data_process = data
else:
data_process = get_data_process(data, list_desc)
overall_keep_list = get_subsampling_index2(data_process, standard_scale = standard_scale, \
cutoff_sig = cutoff_sig, rate = rate, method = method, \
verbose = verbose)
sampling_result = [data[i] for i in overall_keep_list]
return sampling_result
def subsampling_with_PCA(data, list_desc = [], standard_scale = True, cutoff_sig = 0.05, rate = 0.3, \
start_trial_component = 10, max_component = 30, target_variance = 0.999999, \
method = "pykdtree", verbose = 1):
'''
Run the NN-based subsampling algorithm to a list of data points and
return the resulting list of subsampled data points
The data set will first be transformed by PCA, before running the subsampling algorithm
The number of PCs kept is the minimal number of PCs that have sum explained variance
greater than target_variance
Note that the final resulting list of datapoints (sampling_result) is NOT transformed
(since we only used the PCA + subsampling alghorithm to find the indices of the datapoints to be kept)
Parameters
-------------
data: List. the original list of data points
list_desc [[] (empty list)]: List.
the indices of descriptors (features) of the datapoints.
The algorithm would subsample based only on these descriptors
(although other features will still be kept in the resulting subsampled dataset)
If the list is empty, then all feature will be taken into account
standard_scale [True]: Boolean. Whether to apply standard scaler to the dataset prior to subsampling
cutoff_sig [0.02]: Float. cutoff significance. the cutoff distance equals to the Euclidean
norm of the standard deviations in all dimensions of the data points
rate [0.3]: Float. possibility of deletion
start_trial_component [10]: Int. minimum number of PCs.
if the number of features is below this number, then all features will be kept
max_component [30]: Int.the maximum number of PCs to be kept,
even the target variance has not been reached
target_variance [0.999999]: Float. the target sum of variance.
method ["pykdtree"]: String. which backend nearest neighbour | |
<reponame>SACGF/variantgrid<gh_stars>1-10
"""
Imports work by:
import_patient_records:
- Read the CSV with pandas, and create PatientRecord entries
- Set whether a record is valid or needs manual intervention
- Display what's going to happen with the records (ie good, bad etc)
- Then, click SUBMIT button after review
process_patient_records:
- Do the actual conversion from PatientRecord into the various samples etc etc.
"""
import logging
import pandas as pd
from dateutil import parser
from django.utils import timezone
from guardian.shortcuts import get_objects_for_user
from annotation.phenotype_matching import bulk_patient_phenotype_matching
from library.guardian_utils import assign_permission_to_user_and_groups
from library.pandas_utils import df_nan_to_none
from patients.models import PatientColumns, PatientRecord, Specimen, Patient, \
PatientModification, PatientRecordOriginType
from patients.models_enums import Sex, NucleicAcid, Mutation
from snpdb.models import Sample
UNKNOWN_STRING = 'UNKNOWN' # Upper
def assign_patient_to_sample(patient_import, user, sample, patient, description, origin):
""" Creates patient modification record """
if sample.patient == patient:
return
old_patient = sample.patient
sample.patient = patient
sample.save()
if old_patient:
description += f" (previously patient was: {old_patient})"
PatientModification.objects.create(patient=patient,
user=user,
description=description,
origin=origin,
patient_import=patient_import)
def assign_specimen_to_sample(patient_import, user, sample, specimen, description, origin):
""" Creates patient modification record """
if sample.specimen == specimen:
return
old_specimen = sample.specimen
sample.specimen = specimen
sample.save()
if old_specimen:
description += f" (previously specimen was: {old_specimen})"
PatientModification.objects.create(patient=specimen.patient,
user=user,
description=description,
origin=origin,
patient_import=patient_import)
def parse_date(row, column, validation_messages):
date_string = row[column]
d = None
if date_string and not pd.isnull(date_string):
if date_string.upper() != UNKNOWN_STRING:
try:
d = parser.parse(date_string)
except:
message = f"{column}: Could not parse date '{date_string}'"
validation_messages.append(message)
return d
def parse_boolean(row, column, validation_messages, nullable=True):
value = row[column]
if value:
lowercase_value = value.lower()
TRUE_VALUES = ['true', 'y']
FALSE_VALUES = ['false', 'n']
if lowercase_value in TRUE_VALUES:
value = True
elif lowercase_value in FALSE_VALUES:
value = False
else:
message = f"{column}: couldn't interpret boolean from '{value}'"
validation_messages.append(message)
else:
if not nullable:
message = f"{column}: Non-nullable boolean field was None"
validation_messages.append(message)
return value
def parse_choice(choices, row, column, validation_messages):
""" Can be either the key or values in a choice (of any case) """
choice_string = row[column]
if choice_string is None:
return None
choice_string = choice_string.upper()
choice_dict = dict(choices)
if choice_string in choice_dict:
return choice_string
reverse_choice_dict = {b.upper(): a for a, b in choices}
value = reverse_choice_dict.get(choice_string)
if value is None:
valid = ','.join(list(choice_dict.keys()) + list(reverse_choice_dict.keys()))
message = f"{column}: Could not parse choice '{choice_string}' (valid: {valid})"
validation_messages.append(message)
return value
def match_sample(user, sample_id, sample_name, validation_messages):
sample = None
if sample_id or sample_name:
msg = None
samples_qs = get_objects_for_user(user, 'snpdb.change_sample')
if sample_id:
kwargs = {"id": sample_id}
elif sample_name:
kwargs = {"name": sample_name}
try:
sample = samples_qs.get(**kwargs)
except Sample.MultipleObjectsReturned:
msg = "Matched multiple records!"
except Sample.DoesNotExist:
msg = f"Couldn't load sample '{sample_name}'"
if sample_id:
sample_without_permission = Sample.objects.filter(pk=sample_id)
else:
sample_without_permission = Sample.objects.filter(name=sample_name)
if sample_without_permission.exists():
msg += ". Samples exists with that name, but you don't have write access."
if msg:
logging.warning(msg)
validation_messages.append(f"Match Sample({kwargs}): {msg}")
return sample
def create_patient(patient_import, first_name, last_name, sex, date_of_birth, user):
if first_name:
first_name = first_name.upper()
if last_name:
last_name = last_name.upper()
patient = Patient.objects.create(last_name=last_name,
first_name=first_name,
date_of_birth=date_of_birth,
sex=sex or Sex.UNKNOWN)
assign_permission_to_user_and_groups(user, patient)
description = "Imported record"
PatientModification.objects.create(patient=patient,
user=user,
description=description,
origin=PatientRecordOriginType.UPLOADED_CSV,
patient_import=patient_import)
return patient
def set_fields_if_blank(obj, field_values):
""" returns true if change """
changed = False
for k, v in field_values.items():
existing_value = getattr(obj, k)
if existing_value:
changed = True
setattr(obj, k, v)
return changed
def process_record(patient_records, record_id, row):
patient_to_check_for_phenotype_match = None
user = patient_records.user
logging.info("row:")
logging.info(row)
validation_messages = []
family_code = row[PatientColumns.PATIENT_FAMILY_CODE]
first_name = row[PatientColumns.PATIENT_FIRST_NAME]
last_name = row[PatientColumns.PATIENT_LAST_NAME]
date_of_birth = parse_date(row, PatientColumns.DATE_OF_BIRTH, validation_messages)
date_of_death = parse_date(row, PatientColumns.DATE_OF_DEATH, validation_messages)
sex = parse_choice(Sex.choices, row, PatientColumns.SEX, validation_messages)
affected = parse_boolean(row, PatientColumns.AFFECTED, validation_messages)
consanguineous = parse_boolean(row, PatientColumns.CONSANGUINEOUS, validation_messages)
patient_phenotype = row[PatientColumns.PATIENT_PHENOTYPE]
deceased = row[PatientColumns.DECEASED]
# only set patient_deceased if deceased flag set but date_of_death not set
if (deceased == 'Y' and date_of_death is None):
patient_deceased = True
elif (deceased == 'N' and date_of_death is None):
patient_deceased = False
else:
patient_deceased = None
specimen_reference_id = row[PatientColumns.SPECIMEN_REFERENCE_ID]
specimen_description = row[PatientColumns.SPECIMEN_DESCRIPTION]
specimen_collected_by = row[PatientColumns.SPECIMEN_COLLECTED_BY]
specimen_collection_date = parse_date(row, PatientColumns.SPECIMEN_COLLECTION_DATE, validation_messages)
specimen_received_date = parse_date(row, PatientColumns.SPECIMEN_RECEIVED_DATE, validation_messages)
specimen_mutation_type = parse_choice(Mutation.choices, row, PatientColumns.SPECIMEN_MUTATION_TYPE, validation_messages)
specimen_nucleic_acid_source = parse_choice(NucleicAcid.choices, row, PatientColumns.SPECIMEN_NUCLEIC_ACID_SOURCE, validation_messages)
specimen_age_at_collection = row[PatientColumns.SPECIMEN_AGE_AT_COLLECTION_DATE]
sample_id = row[PatientColumns.SAMPLE_ID] or None
sample_name = row[PatientColumns.SAMPLE_NAME]
matched_sample = match_sample(user, sample_id, sample_name, validation_messages)
if matched_sample:
matched_sample_id = int(matched_sample.pk)
else:
matched_sample_id = None
matched_patient = Patient.match(first_name, last_name, sex, date_of_birth, user=user)
if matched_patient:
created_patient = None
else:
created_patient = create_patient(patient_records.patient_import, first_name, last_name, sex, date_of_birth, user)
patient = matched_patient or created_patient
# update date of death if not already set
# set _deceased back to False if DOD is being set
# print('Deceased = %s AND Date of Death = %s' % (patient_deceased, date_of_death))
description = None
if date_of_death is not None:
if patient.date_of_death != date_of_death:
patient._deceased = None
patient.date_of_death = date_of_death
patient.save()
description = "Updated patient date of death"
else:
# only set deceased if no date of death
# update patient_deceased if not already set
if patient_deceased:
if patient._deceased != patient_deceased:
patient.date_of_death = None
patient._deceased = patient_deceased
patient.save()
description = "Updated patient as deceased = True"
elif not patient_deceased:
patient.date_of_death = None
patient._deceased = patient_deceased
patient.save()
description = "Updated patient as deceased = False"
else:
# only clear date_of_death and _deceased if they are not currently NULL
if patient.date_of_death is not None or patient._deceased is not None:
patient.date_of_death = None
patient._deceased = None
patient.save()
description = "Updated patient deceased and date_of_death to NULL values"
# if patient has been modified, create a PatientModification record
if description is not None:
PatientModification.objects.create(patient=patient,
user=user,
description=description,
origin=PatientRecordOriginType.UPLOADED_CSV,
patient_import=patient_records.patient_import)
# Fields that can change (ie not used to match)
PATIENT_FIELDS = {"family_code": family_code,
"affected": affected,
"consanguineous": consanguineous}
patient_modified = False
for patient_field, field_value in PATIENT_FIELDS.items():
if field_value:
setattr(patient, patient_field, field_value)
description = f"Set {patient_field} to {field_value}"
PatientModification.objects.create(patient=patient,
user=user,
description=description,
origin=PatientRecordOriginType.UPLOADED_CSV,
patient_import=patient_records.patient_import)
patient_modified = True
PHENOTYPE_FIELDS = {"phenotype": patient_phenotype}
for phenotype_field, phenotype_value in PHENOTYPE_FIELDS.items():
if phenotype_value:
updated_phenotype = None
existing_value = getattr(patient, phenotype_field)
if existing_value:
if phenotype_value not in existing_value: # not already in there
# TODO: be more sophisticated?
phenotype_lines = [existing_value,
"-- From Imported CSV on %s:" % timezone.now(),
phenotype_value]
updated_phenotype = "\n".join(phenotype_lines)
else:
updated_phenotype = phenotype_value
if updated_phenotype:
patient_to_check_for_phenotype_match = patient
setattr(patient, phenotype_field, updated_phenotype)
patient_modified = True
if patient_modified:
patient.save(check_patient_text_phenotype=False) # Will do bulk at the end
specimen = None
matched_specimen = None
created_specimen = None
# print("specimen_reference_id=%s" %(specimen_reference_id))
if specimen_reference_id:
# print("process specimen id=%s" %(specimen_reference_id))
try:
specimen = Specimen.objects.get(reference_id=specimen_reference_id)
if specimen.patient != patient:
msg = f"{specimen} had patient {patient}, tried to assign to patient {specimen.patient}"
raise ValueError(msg)
matched_specimen = specimen
except Specimen.DoesNotExist:
specimen = Specimen.objects.create(reference_id=specimen_reference_id,
patient=patient)
created_specimen = specimen
field_values = {"reference_id": specimen_reference_id,
"description": specimen_description,
"collected_by": specimen_collected_by,
"patient": patient,
#tissue=tissue,
"collection_date": specimen_collection_date,
"received_date": specimen_received_date,
"mutation_type": specimen_mutation_type,
"nucleic_acid_source": specimen_nucleic_acid_source,
"_age_at_collection_date": specimen_age_at_collection}
changed = set_fields_if_blank(specimen, field_values)
if changed:
# print("save specimen id=%s" %(specimen_reference_id))
specimen.description = specimen_description
specimen.collected_by = specimen_collected_by
specimen.patient = patient
specimen.collection_date = specimen_collection_date
specimen.received_date = specimen_received_date
specimen.mutation_type = specimen_mutation_type
specimen.nucleic_acid_source = specimen_nucleic_acid_source
specimen.age_at_collection = specimen_age_at_collection
specimen.save()
else:
created_specimen = None
if matched_sample:
if specimen:
matched_sample.specimen = specimen
description = "Set during patient records import"
assign_patient_to_sample(patient_records.patient_import, user, matched_sample, patient, description, origin=PatientRecordOriginType.UPLOADED_CSV)
validation_message = '\n'.join(validation_messages)
print(validation_message)
PatientRecord.objects.create(patient_records=patient_records,
record_id=record_id,
validation_message=validation_message,
matched_sample_id=matched_sample_id,
matched_patient=matched_patient,
matched_specimen=matched_specimen,
created_patient=created_patient,
created_specimen=created_specimen,
sample_id=sample_id,
sample_name=sample_name,
patient_family_code=family_code,
patient_first_name=first_name,
patient_last_name=last_name,
date_of_birth=date_of_birth,
date_of_death=date_of_death,
sex=sex,
affected=affected,
consanguineous=consanguineous,
_deceased=patient_deceased,
patient_phenotype=patient_phenotype,
specimen_reference_id=specimen_reference_id,
specimen_description=specimen_description,
specimen_collected_by=specimen_collected_by,
specimen_collection_date=specimen_collection_date,
specimen_received_date=specimen_received_date,
specimen_mutation_type=specimen_mutation_type,
specimen_nucleic_acid_source=specimen_nucleic_acid_source,
specimen_age_at_collection_date=specimen_age_at_collection)
return patient_to_check_for_phenotype_match
def pandas_read_encoded_csv(*args, **kwargs):
""" Try opening as UTF8, then Windows code page 1252 (Western Excel) if that fails """
try:
df = pd.read_csv(*args, **kwargs)
except UnicodeDecodeError as ude:
if "encoding" in kwargs: # Already there explicity - just fail...
raise ude
logging.warning("Reading CSV failed %s, retrying with windows code page", str(args))
kwargs["encoding"] = 'cp1252'
try:
df = pd.read_csv(*args, **kwargs)
except:
raise ude
return df
def get_patient_record_imports_dataframe(f):
df = pandas_read_encoded_csv(f, index_col=None, dtype=str)
df = df_nan_to_none(df)
df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
return df
def import_patient_records(patient_records):
uploaded_file = patient_records.uploaded_file
filename = uploaded_file.get_filename()
df = get_patient_record_imports_dataframe(filename)
missing_columns = set(PatientColumns.COLUMNS) - set(df.columns)
if missing_columns:
expected = len(PatientColumns.COLUMNS)
found_columns = set(PatientColumns.COLUMNS) & set(df.columns)
found = len(found_columns)
missing_str = ','.join([f'"{s}"' for s in missing_columns])
msg = f"Invalid Patient Records Import file. Only {found} of {expected} columns supplied, missing: {missing_str}"
raise ValueError(msg)
items_processed = 0
patients_to_check_for_phenotype_matches = []
for i, row in df.iterrows():
| |
<filename>hsr4hci/metrics.py
"""
Methods for computing performance metrics (e.g., SNR, logFPF, ...).
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from typing import Any, Dict, List, Optional, Tuple, Union
from astropy.units import Quantity
import numpy as np
import scipy.stats as stats
from hsr4hci.coordinates import cartesian2polar, polar2cartesian
from hsr4hci.photometry import get_flux, get_fluxes_for_polar_positions
from hsr4hci.positions import (
get_reference_positions,
rotate_reference_positions,
)
# -----------------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -----------------------------------------------------------------------------
def two_sample_t_test(
planet_samples: Union[List[float], np.ndarray],
noise_samples: Union[List[float], np.ndarray],
) -> Tuple[float, float, float, float, float]:
"""
Compute the two-sample t-test that is the basis of the
signal-to-noise (SNR) as introduced by the following paper:
<NAME>. (2014):
"Fundamental limitations of high contrast imaging set by small
sample statistics". *The Astrophysical Journal*, 792(2), 97.
DOI: 10.1088/0004-637X/792/2/97
Args:
planet_samples: A list of floats containing the results of the
flux measurements at the planet position(s). Generally, in
almost all cases, there is only a single planet position
and, therefore, only a single planet sample.
noise_samples: A list of floats containing the results of the
flux measurements at the reference (or noise) positions.
Returns:
A 5-tuple consisting of
1. ``signal``: The numerator of the SNR.
2. ``noise``: The denominator of the SNR.
3. ``snr``: The signal-to-noise ratio, that is, the test
statistic of the $t$-test that is being performed by this
function (see paper for details).
4. ``fpf``: The false positive fraction, which is computed from
the SNR using the survival function of a $t$-distribution.
5. ``p_value``: The FPF converted to a $p$-value using the
CDF of a $t$-distribution.
"""
# Determine the number of samples; generally, for computing the SNR, there
# will only be a single planet aperture, that is, n_1 = 1
n_1 = len(planet_samples)
n_2 = len(noise_samples)
# Sanity checks
if n_1 < 1:
raise ValueError('planet_samples must have at least 1 entry!')
if n_2 < 2:
raise ValueError('noise_samples must have at least 2 entries!')
# Compute the mean of the planet samples (generally, this is just the one
# planet sample we have), and the mean of the noise / reference apertures
mean_planet = float(np.mean(planet_samples))
mean_noise = float(np.mean(noise_samples))
# Compute the "signal" (= the numerator of the signal-to-noise ratio).
# According to eq. (8) in Mawet et al. (2014), this is given by the
# difference between the (integrated) flux at the "planet position" and
# the mean of the (integrated) fluxes at the reference positions.
signal = mean_planet - mean_noise
# Compute the "noise" (= the denominator of the signal-to-noise ratio).
# According to eq. (8) in Mawet et al. (2014), this is given by the
# *unbiased* standard deviation (i.e., including Bessel's correction) of
# the (integrated) flux in the reference apertures times a correction
# factor to account for the small sample statistics.
noise = np.std(noise_samples, ddof=1) * np.sqrt(1 / n_1 + 1 / n_2)
# Compute the SNR. The SNR is the test statistic of the two-sample t-test,
# and it should follow a t-distribution with a number of degrees of freedom
# that depends on the number of samples (see below).
snr = signal / noise
# The number of degrees of freedom is given by the number of samples
df = n_1 + n_2 - 2
# Compute the false positive fraction (FPF) and the p-value. Unlike the
# SNR, these can be compared "universally", because they do not depend on
# the position (or more precisely: the number of reference positions that
# is associated with a position) anymore.
# According to eq. (10) in Mawet et al. (2014), the FPF is given by
# 1 - F_nu(SNR), where F_nu is the cumulative distribution function (CDF)
# of a t-distribution with `nu = n-1` degrees of freedom, where n is the
# number of reference apertures. For numerical reasons, we use the survival
# function (SF), which is defined precisely as 1-CDF, but may give more
# accurate results.
fpf = stats.t.sf(snr, df=df)
p_value = stats.t.cdf(snr, df=df)
return signal, noise, snr, fpf, p_value
def compute_metrics(
frame: np.ndarray,
polar_position: Tuple[Quantity, Quantity],
aperture_radius: Quantity,
planet_mode: str = 'FS',
noise_mode: str = 'P',
search_radius: Optional[Quantity] = Quantity(1, 'pixel'),
exclusion_angle: Optional[Quantity] = None,
n_rotation_steps: int = 100,
) -> Tuple[Dict[str, Dict[str, float]], Dict[str, Dict[str, Any]]]:
"""
Compute evaluation metrics (SNR, FPF, ...) at a given position.
Args:
frame: The frame (usually a signal estimate) on which to compute
the metrics.
polar_position: The position of the (candidate) planet as a
2-tuple `(separation, angle)` using "astronomical" polar
coordinates (i.e., 0 degrees = North = "up", not "right",
as in mathematical polar coordinates).
aperture_radius: If the ``planet_mode`` or ``noise_mode`` is
aperture-based, this parameter controls the size of the
apertures.
Regardless of the mode, this value is required to determine
the number of reference positions; therefore it cannot be
optional. (Usually set this to 1/2 of the FWHM of the PSF.)
planet_mode: The ``mode`` to be used to measure the flux of the
planet, or signal. See :func:`hsr4hci.photometry.get_flux`
for more details.
noise_mode: The ``mode`` to be used to measure the flux at the
reference positions. See :func:`hsr4hci.photometry.get_flux`
for more details.
Note that this should be compatible with the choice for the
``planet_mode``, meaning that if the mode for the planet is
`"FS"`, the mode for the noise should be `"P"`, and if the
planet mode is `"ASS"`, the noise mode should be `"AS"`.
search_radius: If the ``planet_mode`` is search-based (`"ASS"`
or `"FS"`), this parameter controls how big the area is that
should be considered for maximizing the planet flux.
exclusion_angle: This parameter controls how the reference
positions are chosen. It can be used, for example, to
exclude the reference positions immediately to the left and
right of the planet position, because for some algorithms
(e.g., PCA), these are known to contain self-subtraction /
over-subtraction "wings" which do not give an unbiased
estimate of the background. For more details, see
:func:`hsr4hci.positions.get_reference_positions`.
n_rotation_steps: This parameter determines the number of
rotation steps that are applied to the reference positions:
The exact placement of the reference positions is always
somewhat arbitrary, but can have a rather large effect on
the final metrics. By rotating the reference positions, we
can at least get a feeling for the size of the effect. See
:func:`hsr4hci.positions.rotate_reference_positions` for
more details.
If this value is set to 0, no rotations are performed.
Returns:
A 2-tuple, consisting of
1. A (nested) dictionary containing the mean, median, standard
deviation, minimum and maximum of each metric (signal, noise,
snr, fpf, log_fpf, p_value), and
2. A (nested) dictionary containing the position of the planet
before and after a potential optimization, both in polar and
in Cartesian coordinates.
"""
# Define a shortcut for the frame size
frame_size = (frame.shape[0], frame.shape[1])
# Compute initial position in Cartesian coordinates
initial_position_cartesian = polar2cartesian(
separation=polar_position[0],
angle=polar_position[1],
frame_size=frame_size,
)
# Measure the planet flux and get its final (= optimized) position; both
# in Cartesian and in polar coordinates
final_position_cartesian, planet_flux = get_flux(
frame=frame,
position=initial_position_cartesian,
mode=planet_mode,
aperture_radius=aperture_radius,
search_radius=search_radius,
)
final_position_polar = cartesian2polar(
position=final_position_cartesian, frame_size=frame_size
)
# Collect the planet positions before and after a potential optimization,
# both in Cartesian and (astronomical) polar coordinates
positions = {
'final': {
'polar': final_position_polar,
'cartesian': final_position_cartesian,
},
'initial': {
'polar': polar_position,
'cartesian': initial_position_cartesian,
},
}
# Get the reference positions for the final planet position
reference_positions = get_reference_positions(
polar_position=final_position_polar,
aperture_radius=aperture_radius,
exclusion_angle=exclusion_angle,
)
# Check that we have enough reference positions to continue computation
if len(reference_positions) < 2:
raise RuntimeError('Too few reference positions (i.e., < 2)!')
# Create rotated versions of the reference positions so that we can
# estimate how much the final metrics depend on the exact placement of
# the reference positions (which is, to some degree, arbitrary).
rotated_reference_positions = rotate_reference_positions(
reference_positions=reference_positions,
n_steps=n_rotation_steps,
)
# Keep track of the result variables for the t-test(s)
signals = []
noises = []
snrs = []
fpfs = []
log_fpfs = []
p_values = []
# Loop over the different reference positions, measure | |
<reponame>Keneral/asystem
#
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DEPRECATED
# Do not use flimflam.py in future development.
# Extend / migrate to shill_proxy suite of scripts instead.
import logging, time
import dbus
DEFAULT_CELLULAR_TIMEOUT = 60
def make_dbus_boolean(value):
value = value.upper()
if value in ["ON", "TRUE"]:
return dbus.Boolean(1)
elif value in ["OFF", "FALSE"]:
return dbus.Boolean(0)
else:
return dbus.Boolean(int(value))
#
# Convert a DBus value to a printable value; used
# to print properties returned via DBus
#
def convert_dbus_value(value, indent=0):
# DEPRECATED
spacer = ' ' * indent
if value.__class__ == dbus.Byte:
return int(value)
elif value.__class__ == dbus.Boolean:
return bool(value)
elif value.__class__ == dbus.Dictionary:
valstr = "{"
for key in value:
valstr += "\n" + spacer + " " + \
key + ": " + str(convert_dbus_value(value[key], indent + 4))
valstr += "\n" + spacer + "}"
return valstr
elif value.__class__ == dbus.Array:
valstr = "["
for val in value:
valstr += "\n" + spacer + " " + \
str(convert_dbus_value(val, indent + 4))
valstr += "\n" + spacer + "]"
return valstr
else:
return str(value)
class FlimFlam(object):
# DEPRECATED
SHILL_DBUS_INTERFACE = "org.chromium.flimflam"
UNKNOWN_METHOD = 'org.freedesktop.DBus.Error.UnknownMethod'
UNKNOWN_OBJECT = 'org.freedesktop.DBus.Error.UnknownObject'
DEVICE_WIMAX = 'wimax'
DEVICE_CELLULAR = 'cellular'
@staticmethod
def _GetContainerName(kind):
"""Map shill element names to the names of their collections."""
# For example, Device - > Devices.
# Just pulling this out so we can use a map if we start
# caring about "AvailableTechnologies"
return kind + "s"
@staticmethod
def WaitForServiceState(service, expected_states, timeout,
ignore_failure=False, property_name="State"):
"""Wait until service enters a state in expected_states or times out.
Args:
service: service to watch
expected_states: list of exit states
timeout: in seconds
ignore_failure: should the failure state be ignored?
property_name: name of service property
Returns: (state, seconds waited)
If the state is "failure" and ignore_failure is False we return
immediately without waiting for the timeout.
"""
state = None
start_time = time.time()
timeout = start_time + timeout
while time.time() < timeout:
properties = service.GetProperties(utf8_strings = True)
state = properties.get(property_name, None)
if ((state == "failure" and not ignore_failure) or
state in expected_states):
break
time.sleep(.5)
config_time = time.time() - start_time
# str() to remove DBus boxing
return (str(state), config_time)
@staticmethod
def DisconnectService(service, wait_timeout=15):
try:
service.Disconnect()
except dbus.exceptions.DBusException, error:
if error.get_dbus_name() not in [
FlimFlam.SHILL_DBUS_INTERFACE + ".Error.InProgress",
FlimFlam.SHILL_DBUS_INTERFACE + ".Error.NotConnected", ]:
raise error
return FlimFlam.WaitForServiceState(service, ['idle'], wait_timeout)
def __init__(self, bus=None):
if not bus:
bus = dbus.SystemBus()
self.bus = bus
shill = bus.get_object(FlimFlam.SHILL_DBUS_INTERFACE, "/")
self.manager = dbus.Interface(
shill,
FlimFlam.SHILL_DBUS_INTERFACE + ".Manager")
def _FindDevice(self, device_type, timeout):
""" Return the first device object that matches a given device type.
Wait until the device type is avilable or until timeout
Args:
device_type: string format of the type of device.
timeout: in seconds
Returns: Device or None
"""
timeout = time.time() + timeout
device_obj = None
while time.time() < timeout:
device_obj = self.FindElementByPropertySubstring('Device',
'Type',
device_type)
if device_obj:
break
time.sleep(1)
return device_obj
def FindCellularDevice(self, timeout=DEFAULT_CELLULAR_TIMEOUT):
return self._FindDevice(self.DEVICE_CELLULAR, timeout)
def FindWimaxDevice(self, timeout=30):
return self._FindDevice(self.DEVICE_WIMAX, timeout)
def _FindService(self, device_type, timeout):
"""Return the first service object that matches the device type.
Wait until a service is available or until the timeout.
Args:
device_type: string format of the type of device.
timeout: in seconds
Returns: service or None
"""
start_time = time.time()
timeout = start_time + timeout
service = None
while time.time() < timeout:
service = self.FindElementByPropertySubstring('Service',
'Type', device_type)
if service:
break
time.sleep(.5)
return service
def FindCellularService(self, timeout=DEFAULT_CELLULAR_TIMEOUT):
return self._FindService(self.DEVICE_CELLULAR, timeout)
def FindWimaxService(self, timeout=30):
return self._FindService(self.DEVICE_WIMAX, timeout)
def GetService(self, params):
path = self.manager.GetService(params)
return self.GetObjectInterface("Service", path)
def ConnectService(self, assoc_timeout=15, config_timeout=15,
async=False, service=None, service_type='',
retry=False, retries=1, retry_sleep=15,
save_creds=False,
**kwargs):
"""Connect to a service and wait until connection is up
Args:
assoc_timeout, config_timeout: Timeouts in seconds.
async: return immediately. do not wait for connection.
service: DBus service
service_type: If supplied, invoke type-specific code to find service.
retry: Retry connection after Connect failure.
retries: Number of retries to allow.
retry_sleep: Number of seconds to wait before retrying.
kwargs: Additional args for type-specific code
Returns:
(success, dictionary), where dictionary contains stats and diagnostics.
"""
output = {}
connected_states = ["ready", "portal", "online"]
# Retry connections on failure. Need to call GetService again as some
# Connect failure states are unrecoverable.
connect_success = False
while not connect_success:
if service_type == "wifi":
try:
# Sanity check to make sure the caller hasn't provided
# both a service and a service type. At which point its
# unclear what they actually want to do, so err on the
# side of caution and except out.
if service:
raise Exception('supplied service and service type')
params = {
"Type": service_type,
"Mode": kwargs["mode"],
"SSID": kwargs["ssid"],
"Security": kwargs.get("security", "none"),
"SaveCredentials": save_creds }
# Supply a passphrase only if it is non-empty.
passphrase = kwargs.get("passphrase", "")
if passphrase:
params["Passphrase"] = passphrase
path = self.manager.GetService(params)
service = self.GetObjectInterface("Service", path)
except Exception, e:
output["reason"] = "FAIL(GetService): exception %s" % e
return (False, output)
output["service"] = service
try:
service.Connect()
connect_success = True
except Exception, e:
if not retry or retries == 0:
output["reason"] = "FAIL(Connect): exception %s" % e
return (False, output)
else:
logging.info("INFO(Connect): connect failed. Retrying...")
retries -= 1
if not connect_success:
# FlimFlam can be a little funny sometimes. At least for In
# Progress errors, even though the service state may be failed,
# it is actually still trying to connect. As such, while we're
# waiting for retry, keep checking the service state to see if
# it actually succeeded in connecting.
state = FlimFlam.WaitForServiceState(
service=service,
expected_states=connected_states,
timeout=retry_sleep,
ignore_failure=True)[0]
if state in connected_states:
return (True, output)
# While service can be caller provided, it is also set by the
# GetService call above. If service was not caller provided we
# need to reset it to None so we don't fail the sanity check
# above.
if service_type != '':
service = None
if async:
return (True, output)
logging.info("Associating...")
(state, assoc_time) = (
FlimFlam.WaitForServiceState(service,
["configuration"] + connected_states,
assoc_timeout))
output["state"] = state
if state == "failure":
output["reason"] = "FAIL(assoc)"
if assoc_time > assoc_timeout:
output["reason"] = "TIMEOUT(assoc)"
output["assoc_time"] = assoc_time
if "reason" in output:
return (False, output)
(state, config_time) = (
FlimFlam.WaitForServiceState(service,
connected_states, config_timeout))
output["state"] = state
if state == "failure":
output["reason"] = "FAIL(config)"
if config_time > config_timeout:
output["reason"] = "TIMEOUT(config)"
output["config_time"] = config_time
if "reason" in output:
return (False, output)
return (True, output)
def GetObjectInterface(self, kind, path):
return dbus.Interface(
self.bus.get_object(FlimFlam.SHILL_DBUS_INTERFACE, path),
FlimFlam.SHILL_DBUS_INTERFACE + "." + kind)
def FindElementByNameSubstring(self, kind, substring):
properties = self.manager.GetProperties(utf8_strings = True)
for path in properties[FlimFlam._GetContainerName(kind)]:
if path.find(substring) >= 0:
return self.GetObjectInterface(kind, path)
return None
def FindElementByPropertySubstring(self, kind, prop, substring):
properties = self.manager.GetProperties(utf8_strings = True)
for path in properties[FlimFlam._GetContainerName(kind)]:
obj = self.GetObjectInterface(kind, path)
try:
obj_properties = obj.GetProperties(utf8_strings = True)
except dbus.exceptions.DBusException, error:
if (error.get_dbus_name() == self.UNKNOWN_METHOD or
error.get_dbus_name() == self.UNKNOWN_OBJECT):
# object disappeared; ignore and keep looking
continue
else:
raise error
if (prop in obj_properties and
obj_properties[prop].find(substring) >= 0):
return obj
return None
def GetObjectList(self, kind, properties=None):
if properties is None:
properties = self.manager.GetProperties(utf8_strings = True)
return [self.GetObjectInterface(kind, path)
for path in properties[FlimFlam._GetContainerName(kind)]]
def GetActiveProfile(self):
properties = self.manager.GetProperties(utf8_strings = True)
return self.GetObjectInterface("Profile", properties["ActiveProfile"])
def CreateProfile(self, ident):
path = self.manager.CreateProfile(ident)
return self.GetObjectInterface("Profile", path)
def RemoveProfile(self, ident):
self.manager.RemoveProfile(ident)
def PushProfile(self, ident):
path = self.manager.PushProfile(ident)
return self.GetObjectInterface("Profile", path)
def PopProfile(self, ident):
self.manager.PopProfile(ident)
def PopAnyProfile(self):
self.manager.PopAnyProfile()
def GetSystemState(self):
properties = self.manager.GetProperties(utf8_strings = True)
return properties["State"]
def GetDebugTags(self):
return self.manager.GetDebugTags()
def ListDebugTags(self):
return self.manager.ListDebugTags()
def SetDebugTags(self, taglist):
try:
self.manager.SetDebugTags(taglist)
self.SetDebugLevel(-4)
except dbus.exceptions.DBusException, error:
if error.get_dbus_name() not in [
"org.freedesktop.DBus.Error.UnknownMethod" ]:
raise error
def SetDebugLevel(self, level):
| |
value):
pass
# ____________________________________________________________
# ____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
_debugging = False # not in use
_new_injections = True
_assertions_quadratic = True # issue warnings
_assertions_cubic = True
_depreciated = True
def cma_default_options_( # to get keyword completion back
# the follow string arguments are evaluated if they do not contain "filename"
AdaptSigma='True # or False or any CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA, CMAAdaptSigmaCSA',
CMA_active='True # negative update, conducted after the original update',
# CMA_activefac='1 # learning rate multiplier for active update',
CMA_cmean='1 # learning rate for the mean value',
CMA_const_trace='False # normalize trace, 1, True, "arithm", "geom", "aeig", "geig" are valid',
CMA_diagonal='0*100*N/popsize**0.5 # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable?
CMA_eigenmethod='np.linalg.eigh # or cma.utilities.math.eig or pygsl.eigen.eigenvectors',
CMA_elitist='False #v or "initial" or True, elitism likely impairs global search performance',
CMA_injections_threshold_keep_len='1 #v keep length if Mahalanobis length is below the given relative threshold',
CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
CMA_mirrormethod='2 # 0=unconditional, 1=selective, 2=selective with delay',
CMA_mu='None # parents selection parameter, default is popsize // 2',
CMA_on='1 # multiplier for all covariance matrix updates',
# CMA_sample_on_sphere_surface='False #v replaced with option randn=cma.utilities.math.randhss, all mutation vectors have the same length, currently (with new_sampling) not in effect',
CMA_sampler='None # a class or instance that implements the interface of `cma.interfaces.StatisticalModelSamplerWithZeroMeanBaseClass`',
CMA_sampler_options='{} # options passed to `CMA_sampler` class init as keyword arguments',
CMA_rankmu='1.0 # multiplier for rank-mu update learning rate of covariance matrix',
CMA_rankone='1.0 # multiplier for rank-one update learning rate of covariance matrix',
CMA_recombination_weights='None # a list, see class RecombinationWeights, overwrites CMA_mu and popsize options',
CMA_dampsvec_fac='np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update',
CMA_dampsvec_fade='0.1 # tentative fading out parameter for sigma vector update',
CMA_teststds='None # factors for non-isotropic initial distr. of C, mainly for test purpose, see CMA_stds for production',
CMA_stds='None # multipliers for sigma0 in each coordinate, not represented in C, makes scaling_of_variables obsolete',
# CMA_AII='False # not yet tested',
CSA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere',
CSA_damp_mueff_exponent='0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
CSA_disregard_length='False #v True is untested, also changes respective parameters',
CSA_clip_length_value='None #v poorly tested, [0, 0] means const length N**0.5, [-1, 1] allows a variation of +- N/(N+2), etc.',
CSA_squared='False #v use squared length for sigma-adaptation ',
BoundaryHandler='BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``',
bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector',
# , eval_parallel2='not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes'
# 'callback='None # function or list of functions called as callback(self) at the end of the iteration (end of tell)', # only necessary in fmin and optimize
conditioncov_alleviate='[1e8, 1e12] # when to alleviate the condition in the coordinates and in main axes',
eval_final_mean='True # evaluate the final mean, which is a favorite return candidate',
fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized',
ftarget='-inf #v target function value, minimization',
integer_variables='[] # index list, invokes basic integer handling: prevent std dev to become too small in the given variables',
is_feasible='is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)',
maxfevals='inf #v maximum number of function evaluations',
maxiter='100 + 150 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',
mean_shift_line_samples='False #v sample two new solutions colinear to previous mean shift',
mindx='0 #v minimal std in any arbitrary direction, cave interference with tol*',
minstd='0 #v minimal std (scalar or vector) in any coordinate direction, cave interference with tol*',
maxstd='inf #v maximal std in any coordinate direction',
pc_line_samples='False #v one line sample along the evolution path pc',
popsize='4+int(3*np.log(N)) # population size, AKA lambda, number of new solution per iteration',
randn='np.random.randn #v randn(lam, N) must return an np.array of shape (lam, N), see also cma.utilities.math.randhss',
scaling_of_variables='''None # deprecated, rather use fitness_transformations.ScaleCoordinates instead (or possibly CMA_stds).
Scale for each variable in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by
scaling_of_variables and sigma is unchanged, default is `np.ones(N)`''',
seed='time # random number seed for `numpy.random`; `None` and `0` equate to `time`, `np.nan` means "do nothing", see also option "randn"',
signals_filename='cma_signals.in # read versatile options from this file (use `None` or `""` for no file) which contains a single options dict, e.g. ``{"timeout": 0}`` to stop, string-values are evaluated, e.g. "np.inf" is valid',
termination_callback='[] #v a function or list of functions returning True for termination, called in `stop` with `self` as argument, could be abused for side effects',
timeout='inf #v stop if timeout seconds are exceeded, the string "2.5 * 60**2" evaluates to 2 hours and 30 minutes',
tolconditioncov='1e14 #v stop if the condition of the covariance matrix is above `tolconditioncov`',
tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(eivenvals(C)**0.5) indicates "creeping behavior" with usually minor improvements',
tolflatfitness='1 #v iterations tolerated with flat fitness before termination',
tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful',
tolfunhist='1e-12 #v termination criterion: tolerance in function value history',
tolfunrel='0 #v termination criterion: relative tolerance in function value: Delta f current < tolfunrel * (median0 - median_min)',
tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
tolx='1e-11 #v termination criterion: tolerance in x-changes',
transformation='''None # depreciated, use cma.fitness_transformations.FitnessTransformation instead.
[t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno),
t1 is the (optional) back transformation, see class GenoPheno''',
typical_x='None # used with scaling_of_variables',
updatecovwait='None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution?
verbose='3 #v verbosity e.g. of initial/final message, -1 is very quiet, -9 maximally quiet, may not be fully implemented',
verb_append='0 # initial evaluation counter, if append, do not overwrite output files',
verb_disp='100 #v verbosity: display console output every verb_disp iteration',
verb_filenameprefix=CMADataLogger.default_prefix + ' # output path and filenames prefix',
verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions',
verb_log_expensive='N * (N <= 50) # allow to execute eigendecomposition for logging every verb_log_expensive iteration, 0 or False for never',
verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration',
verb_time='True #v output timings on console',
vv='{} #? versatile set or dictionary for hacking purposes, value found in self.opts["vv"]'
):
"""use this function to get keyword completion for `CMAOptions`.
``cma.CMAOptions('substr')`` provides even substring search.
returns default options as a `dict` (not a `cma.CMAOptions` `dict`).
"""
return dict(locals()) # is defined before and used by CMAOptions, so it can't return CMAOptions
cma_default_options = cma_default_options_() # will later be reassigned as CMAOptions(dict)
cma_versatile_options = tuple(sorted(k for (k, v) in cma_default_options.items()
if v.find(' #v ') > 0))
cma_allowed_options_keys = dict([s.lower(), s] for s in cma_default_options)
class CMAOptions(dict):
"""a dictionary with the available options and their default values
for class `CMAEvolutionStrategy`.
``CMAOptions()`` returns a `dict` with all available options and their
default values with a comment string.
``CMAOptions('verb')`` returns a subset of recognized options that
contain 'verb' in there keyword name or (default) value or
description.
``CMAOptions(opts)`` returns the subset of recognized options in
``dict(opts)``.
Option values can be "written" in a string and, when passed to `fmin`
or `CMAEvolutionStrategy`, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are given
as such a string.
Details
-------
`CMAOptions` entries starting with ``tol`` are termination
"tolerances".
For `tolstagnation`, the median over the first | |
<reponame>ckyycc/hana_os_monitor_script_v2<filename>test/test_analyzer.py
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
from util import MonitorUtility as Mu
from util import MonitorConst as Mc
from util import InfoType
from analyzer import DataAnalyzer
class TestAnalyzer(TestCase):
def setUp(self):
self.server_id = 1
self.check_id = "20191125010101001"
Mu.generate_check_id = MagicMock(return_value=self.check_id)
def test_analyze_disk(self):
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.MSG_INFO:
{"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000}, 'folder3': {'ck3adm': 30000}},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mem(self):
mem_total, mem_free = 1000000000, 2500000
msg_list = [{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_cpu(self):
num, usage = 512, 78
msg_list = [{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_instance(self):
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.MSG_INFO:
{"CK1": {Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id},
"CK2": {Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.FIELD_SERVER_ID: self.server_id},
"CK3": {Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id}},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id})]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.DISK.value,
Mc.MSG_INFO: {"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000},
'folder3': {'ck3adm': 30000}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.MSG_INFO: {
"CK1": {Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id},
"CK2": {Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.FIELD_SERVER_ID: self.server_id},
"CK3": {Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.FIELD_SERVER_ID: self.server_id}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})
]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix_abandon_if_no_ending(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c3", Mc.FIELD_PROCESS_ID: 3001,
Mc.FIELD_MEM: 35, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c6", Mc.FIELD_PROCESS_ID: 2002,
Mc.FIELD_CPU: 28, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_PROCESS_COMMAND: "c7", Mc.FIELD_PROCESS_ID: 3002,
Mc.FIELD_CPU: 38, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_FOLDER: "folder1", Mc.FIELD_DISK_USAGE_KB: 10000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_FOLDER: "folder2", Mc.FIELD_DISK_USAGE_KB: 20000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck3adm", Mc.FIELD_FOLDER: "folder3", Mc.FIELD_DISK_USAGE_KB: 30000,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.FIELD_SID: "CK3", Mc.FIELD_INSTANCE_NO: "22", Mc.FIELD_HOST: "server_3",
Mc.FIELD_REVISION: '2.00.044.00', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
# remove ending of instance
# {Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_ENDING: True}]
mock_producer = self.__mock_analyze(msg_list)
calls = [
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.MSG_INFO: {"ck1adm": 18, 'ck2adm': 28, 'ck3adm': 38},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_CPU_NUMBER: num,
Mc.FIELD_CPU_UTILIZATION: usage}),
call(Mc.TOPIC_FILTERED_INFO, {Mc.MSG_TYPE: InfoType.DISK.value,
Mc.MSG_INFO: {"folder1": {"ck1adm": 10000}, 'folder2': {'ck2adm': 20000},
'folder3': {'ck3adm': 30000}}, Mc.FIELD_CHECK_ID: self.check_id,
Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free}),
call(Mc.TOPIC_FILTERED_INFO,
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.MSG_INFO: {"ck1adm": 15, 'ck2adm': 25, 'ck3adm': 35},
Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_SERVER_ID: self.server_id, Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free})
]
mock_producer.return_value.send.assert_has_calls(calls, any_order=False) # should be sequential
def test_analyze_mix_abandon_first_msg_if_second_comes_before_ending_of_first_one(self):
num, usage = 512, 78
mem_total, mem_free = 1000000000, 2500000
disk_total, disk_free = 1234567890, 34567890
msg_list = [{Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.MSG_TYPE: InfoType.INSTANCE.value,
Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK1", Mc.FIELD_INSTANCE_NO: "00", Mc.FIELD_HOST: "server_1",
Mc.FIELD_REVISION: '1.00.122.25', Mc.FIELD_EDITION: 'Database',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_CPU_NUMBER: num, Mc.FIELD_CPU_UTILIZATION: usage,
Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c5", Mc.FIELD_PROCESS_ID: 1002,
Mc.FIELD_CPU: 18, Mc.MSG_TYPE: InfoType.CPU.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_SID: "CK2", Mc.FIELD_INSTANCE_NO: "12", Mc.FIELD_HOST: "server_2",
Mc.FIELD_REVISION: '2.00.033.00', Mc.FIELD_EDITION: 'Cockpit',
Mc.MSG_TYPE: InfoType.INSTANCE.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_MEM_TOTAL: mem_total, Mc.FIELD_MEM_FREE: mem_free,
Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck1adm", Mc.FIELD_PROCESS_COMMAND: "c1", Mc.FIELD_PROCESS_ID: 1001,
Mc.FIELD_MEM: 15, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id, Mc.MSG_HEADER: True},
{Mc.FIELD_CHECK_ID: self.check_id, Mc.FIELD_DISK_TOTAL: disk_total, Mc.FIELD_DISK_FREE: disk_free,
Mc.MSG_TYPE: InfoType.DISK.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: "ck2adm", Mc.FIELD_PROCESS_COMMAND: "c2", Mc.FIELD_PROCESS_ID: 2001,
Mc.FIELD_MEM: 25, Mc.MSG_TYPE: InfoType.MEMORY.value, Mc.FIELD_SERVER_ID: self.server_id},
{Mc.FIELD_USER_NAME: | |
<reponame>AnyOfYou/pyftpsync
# -*- coding: utf-8 -*-
"""
(c) 2012-2021 <NAME>; see https://github.com/mar10/pyftpsync
Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
"""
import configparser
import getpass
import logging
import netrc
import os
import sys
from datetime import datetime
_logger = None
PYTHON_VERSION = "{}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
)
# def is_basestring(s):
# """Return True for any string type, i.e. for str/unicode on Py2 and bytes/str on Py3."""
# return isinstance(s, (str, bytes))
# def is_bytes(s):
# """Return True for bytestrings, i.e. for str on Py2 and bytes on Py3."""
# return isinstance(s, bytes)
def is_native(s):
"""Return True for native strings, i.e. for str on Py2 and Py3."""
return isinstance(s, str)
# def is_unicode(s):
# """Return True for unicode strings, i.e. for unicode on Py2 and str on Py3."""
# return isinstance(s, str)
def to_bytes(s, encoding="utf-8"):
"""Convert a text string (unicode) to bytestring, i.e. str on Py2 and bytes on Py3."""
if type(s) is not bytes:
s = bytes(s, encoding)
return s
def to_native(s, encoding="utf-8"):
"""Convert data to native str type, i.e. bytestring on Py2 and unicode on Py3."""
if type(s) is bytes:
s = str(s, encoding)
elif type(s) is not str:
s = str(s)
return s
to_unicode = to_native
"""Convert binary data to unicode (text strings) on Python 2 and 3."""
# Binary Strings
# b_empty = to_bytes("")
# b_slash = to_bytes("/")
def get_pyftpsync_logger():
return _logger
def set_pyftpsync_logger(logger=True):
"""Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger.
"""
global _logger
prev_logger = _logger
if logger is True:
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger("pyftpsync")
_logger.setLevel(logging.DEBUG)
else:
_logger = logger
return prev_logger
# Init default logger
set_pyftpsync_logger(True)
def write(*args, **kwargs):
"""Redirectable wrapper for print statements."""
debug = kwargs.pop("debug", None)
warning = kwargs.pop("warning", None)
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
if debug:
_logger.debug(*args, **kwargs)
elif warning:
_logger.warning(*args, **kwargs)
else:
_logger.info(*args, **kwargs)
else:
print(*args, **kwargs)
def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs)
try:
import colorama # provide color codes, ...
colorama.init() # improve color handling on windows terminals
except ImportError:
write_error(
"Unable to import 'colorama' library: Colored output is not available. "
"Try `pip install colorama`."
)
colorama = None
def check_cli_verbose(default=3):
"""Check for presence of `--verbose`/`--quiet` or `-v`/`-q` without using argparse."""
args = sys.argv[1:]
verbose = default + args.count("--verbose") - args.count("--quiet")
for arg in args:
if arg.startswith("-") and not arg.startswith("--"):
verbose += arg[1:].count("v")
verbose -= arg[1:].count("q")
return verbose
try:
# prevent loading messages
if check_cli_verbose() <= 3:
logging.getLogger("keyring.backend").setLevel(logging.WARNING)
import keyring
except ImportError:
write_error(
"Unable to import 'keyring' library: Storage of passwords is not available. "
"Try `pip install keyring`."
)
keyring = None
DEFAULT_CREDENTIAL_STORE = "pyftpsync.pw"
DRY_RUN_PREFIX = "(DRY-RUN) "
IS_REDIRECTED = os.fstat(0) != os.fstat(1)
# DEFAULT_BLOCKSIZE = 8 * 1024
VT_ERASE_LINE = "\x1b[2K"
#: Enable additional logging. Supported values: 'classify', 'match'
#: (also requires verbosity >= 4).
DEBUG_FLAGS = set()
def namespace_to_dict(o):
"""Convert an argparse namespace object to a dictionary."""
d = {}
for k, v in o.__dict__.items():
if not callable(v):
d[k] = v
return d
# def dict_get_ci(d, key, default=None):
# """Lookup dict value case insensitive.
# Returns:
# tuple (matched_key, value, is_exact_natch)
# """
# if key in d:
# return key, d[key], True
# lk = key.lower()
# for k, v in d.items():
# if k.lower() == lk:
# return k, v, False
# return key, default, False
def eps_compare(f1, f2, eps):
"""Return true if |f1-f2| <= eps."""
res = f1 - f2
if abs(res) <= eps: # '<=',so eps == 0 works as expected
return 0
elif res < 0:
return -1
return 1
def pretty_stamp(stamp):
"""Convert timestamp to verbose string (strip fractions of seconds)."""
if stamp is None:
return "n.a."
return datetime.fromtimestamp(stamp).strftime("%Y-%m-%d %H:%M:%S")
_pyftpsyncrc_parser = configparser.RawConfigParser()
_pyftpsyncrc_parser.read(os.path.expanduser("~/.pyftpsyncrc"))
def get_option(env_name, section, opt_name, default=None):
"""Return a configuration setting from environment var or .pyftpsyncrc"""
val = os.environ.get(env_name)
if val is None:
try:
val = _pyftpsyncrc_parser.get(section, opt_name)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if val is None:
val = default
return val
# ===============================================================================
#
# ===============================================================================
def prompt_for_password(url, user=None, default_user=None):
"""Prompt for username and password.
If a user name is passed, only prompt for a password.
Args:
url (str): hostname
user (str, optional):
Pass a valid name to skip prompting for a user name
default_user (str, optional):
Pass a valid name that is used as default when prompting
for a user name
Raises:
KeyboardInterrupt if user hits Ctrl-C
Returns:
(username, password) or None
"""
if user is None:
default_user = default_user or getpass.getuser()
while user is None:
user = input("Enter username for {} [{}]: ".format(url, default_user))
if user.strip() == "" and default_user:
user = default_user
if user:
pw = getpass.getpass(
"Enter password for {}@{} (Ctrl+C to abort): ".format(user, url)
)
if pw or pw == "":
return (user, pw)
return None
def get_credentials_for_url(url, opts, force_user=None):
"""Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Args:
url (str): target URL (without username or password parts)
opts (dict):
force_user (str, optional) username to be used instead of prompting
Returns:
2-tuple (username, password) or None
"""
assert "@" not in url
creds = None
verbose = int(opts.get("verbose", 3))
force_prompt = opts.get("prompt", False)
allow_prompt = not opts.get("no_prompt", True)
allow_keyring = not opts.get("no_keyring", False) and not force_user
allow_netrc = not opts.get("no_netrc", False) and not force_user
# print("get_credentials_for_url", url, force_user, allow_prompt)
if force_user and not allow_prompt:
raise RuntimeError(
"Cannot get credentials for a distinct user ({}) from keyring or .netrc and "
"prompting is disabled.".format(force_user)
)
# Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x
home_path = os.path.expanduser("~")
file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)
if os.path.isfile(file_path):
raise RuntimeError(
"Custom password files are no longer supported. Delete {} and use .netrc instead.".format(
file_path
)
)
# 1. Try keyring database
if creds is None and keyring and allow_keyring and not force_prompt:
try:
# Note: we pass the url as `username` and username:password as `password`
c = keyring.get_password("pyftpsync", url)
if c is not None:
creds = c.split(":", 1)
write(
"Using credentials from keyring('pyftpsync', '{}'): {}:***.".format(
url, creds[0]
)
)
else:
if verbose >= 4:
write(
"No credentials found in keyring('pyftpsync', '{}').".format(
url
)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
# e.g. user clicked 'no'
write_error("Could not get password from keyring {}".format(e))
# 2. Try .netrc file
if creds is None and allow_netrc and not force_prompt:
try:
authenticators = None
authenticators = netrc.netrc().authenticators(url)
except FileNotFoundError:
if verbose >= 4:
write("Could not get password (no .netrc file).")
except Exception as e:
write_error("Could not read .netrc: {}.".format(e))
if authenticators:
creds = (authenticators[0], authenticators[2])
write("Using credentials from .netrc file: {}:***.".format(creds[0]))
else:
if verbose >= 4:
write("Could not find entry for '{}' in .netrc file.".format(url))
# 3. Prompt for password if we don't have credentials yet, or --prompt was set.
if creds is None and allow_prompt and not force_prompt:
creds = prompt_for_password(url, user=force_user)
if force_prompt:
# --prompt was set but we can provide a default for the user name
assert not creds
creds = prompt_for_password(url, default_user=force_user)
# creds = prompt_for_password(url, default_user=creds[0])
return creds
def save_password(url, username, password):
"""Store credentials in keyring."""
if keyring:
if ":" in username:
raise RuntimeError(
"Unable to store credentials if username contains a ':' ({}).".format(
username
)
)
try:
# Note: we pass the url as `username` and username:password as `password`
if password is None:
keyring.delete_password("pyftpsync", url)
write("Delete credentials from keyring ({})".format(url))
else:
keyring.set_password(
"pyftpsync", url, "{}:{}".format(username, password)
)
write(
"Store credentials in keyring ({}, {}:***).".format(url, username)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
write("Could not delete/set password {}.".format(e))
pass # e.g. user clicked 'no'
else:
write("Could not store credentials (missing keyring support).")
return
def str_to_bool(val):
"""Return a boolean for '0', 'false', 'on', ..."""
val = str(val).lower().strip()
if val in ("1", "true", "on", "yes"):
return True
elif val in ("0", "false", "off", "no"):
return False
raise ValueError(
"Invalid value '{}'"
"(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val)
)
def ansi_code(name):
"""Return ansi color or style codes or '' if colorama is not available."""
try:
obj = colorama
for part in name.split("."):
obj = getattr(obj, part)
return obj
except AttributeError:
return ""
def byte_compare(stream_a, stream_b):
"""Byte compare two files (early out on first difference).
Returns:
| |
<gh_stars>0
#!/usr/bin/env python
# --------------------------------------------------------
# Copyright (c) 2018 VidTeq
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import os
import os.path as osp
import time
import matplotlib.pyplot as plt
import datetime
import numpy as np
import json
class NumpyEncoder(json.JSONEncoder):
"""Special json encoder for numpy types
Ref:
https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
"""
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj,(np.ndarray,)):
#### This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def numpy_to_json(json_input):
json_str = json.dumps(json_input, cls=NumpyEncoder)
return json_str
## file_name = "mask_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
'''
`mkdir -p` linux command functionality
References:
* https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
'''
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get(module, name):
from importlib import import_module
fn = None
mod = import_module(module)
if mod:
fn = getattr(mod, name)
if not fn:
print("_get: function is not defined in the module!")
return fn
def load_module(module):
from importlib import import_module
mod = import_module(module)
return mod
def getDnnModule(args):
# dnnModule = "pixel.mask_rcnn"
# dnnModule = "pixel.faster_rcnn_end2end"
dnnarch = args.DNNARCH
dnnModule = "pixel."+dnnarch
return dnnModule
## TBD: error handling
def yaml_load(fileName):
import yaml
from easydict import EasyDict as edict
fc = None
with open(fileName, 'r') as f:
# fc = edict(yaml.load(f))
fc = edict(yaml.safe_load(f))
return fc
def getHeader(fileName, delimiter):
with open(fileName,'r') as f:
for line in f:
return line.rstrip('\n').split(delimiter)
def readLine(fileName, delimiter, skipHeader):
with open(fileName,'r') as f:
gen = (i for i in f)
if skipHeader:
next(gen) # skip the header row
for line in gen:
yield line.rstrip('\n').split(delimiter)
## Usage: list( getOnlyFilesInDir(path) )
def getOnlyFilesInDir(path):
for file in os.listdir(path):
if osp.isfile(osp.join(path, file)):
yield file
def createResponseForVisionAPI(im_name, FD, __appcfg, all_rows_for_all_classes, detections, baseUrl):
# header = getOutFileHeader(FD).split(FD)
# hFields = { header[i]:i for i in range(0,len(header), 1) }
fname = osp.basename(im_name)
ID = __appcfg.ID
print("createResponseForVisionAPI::ID: {}".format(ID))
apiDetectionsUrl = []
# for url in detections:
# apiUrl = baseUrl+"/detections/"+url
# print("apiUrl: {}".format(apiUrl))
# apiDetectionsUrl.append(apiUrl)
res = {
"name": fname
,"api": {
"uploads":baseUrl+"/uploads/"+fname
# ,"detections":apiDetectionsUrl
}
,"type":ID
# ,"bboxfields": hFields
,"result": all_rows_for_all_classes
}
return res
## Ref::
## https://stackoverflow.com/questions/17153978/flask-error-handling-response-object-is-not-iterable
## not used now as using from flask import jsonify
# def getResponse(data, res_code, mimetype):
# # print("getResponse:")
# # print(data)
# from flask import Response
# res = Response(response=data, status=res_code, mimetype=mimetype)
# # print("data, res_code, mimetype: {} {} {}".format(data, res_code, mimetype))
# if mimetype=="application/xml":
# res.headers["Content-Type"] = "text/xml; charset=utf-8"
# if mimetype=="application/json":
# res.headers["Content-Type"] = "text/json; charset=utf-8"
# # print("res.headers: {}".format(res.headers))
# # print("--------------")
# return res
def getOutFileRow(bbox, label, score, width, height, FD):
print("getOutFileRow::bbox: {}".format(bbox))
# mask_rcnn: getOutFileRow: bbox:: image coordinates => following this convention now!
# [ 306 23 1080 1920] => [y1,x1,y2,x2] => [top, left, bottom, right] mapping in Util.getOutFileRow
# faster_rcnn_end2end: getOutFileRow::bbox: => this was original output now transformed to mask_rcnn convention
# [643.95715 105.885155 717.3395 177.24414 ] => [left, top, right, bottom] => [x1,y1,x2,y2]
if len(bbox) > 0:
# left = bbox[0]
# top = bbox[1]
# right = bbox[2]
# bottom = bbox[3]
left = bbox[1]
top = bbox[0]
right = bbox[3]
bottom = bbox[2]
row = str(label)+FD+str(score)+FD+str(width)+FD+str(height)+FD+str(left)+FD+str(top)+FD+str(right - left)+FD+str(bottom - top)
else:
row = str(label)+FD+"null"+FD+str(width)+FD+str(height)+FD+"null"+FD+"null"+FD+"null"+FD+"null"
return row
def getOutFileHeader(FD):
header = 'label'+FD+'score'+FD+'image_width'+FD+'image_height'+FD+'left_x'+FD+'top_y'+FD+'width_w'+FD+'height_h'
return header
## TBD: some issue, review the logic again for the standalone app execution
def getOutFileName(path, im_name, ext, __appcfg):
fpath = path
fname = osp.basename(im_name)
# print("getOutFileName:fpath, im_name, fname: {}, {}, {}".format(fpath, im_name, fname))
if osp.isdir(path):
fpath = path
else:
fpath = osp.dirname(path)
if ext==".csv":
fname = __appcfg.ID + "-" + __appcfg.REL_NUM + "-" + fname
# fname = fname+"-{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
fileName = osp.join(fpath, fname + ext)
print("getOutFileName:fpath, fname, fileName: {}, {}, {}".format(fpath, fname, fileName))
return fileName
def getVizImageFileName(im_name, vizType, __appcfg ):
ext = ".png"
## vizType = viz, splash, mask, None
fname = osp.basename(im_name)
if vizType is None:
vizImageFileName = fname
else:
vizImageFileName = fname+"-"+__appcfg.ID+"-"+__appcfg.REL_NUM+"-"+vizType+ext
# fpath = osp.dirname(osp.abspath(im_name))
# return osp.join(fpath, vizImageFileName)
return vizImageFileName
def vis_detections_from_csvfile(im_name, delimiter, path, out_file, __appcfg):
t3 = time.time()
print("vis_detections_from_csvfile")
fileName = getOutFileName(out_file, im_name, ".csv", __appcfg)
print("fileName:")
print(fileName)
print("path:")
print(path)
print("im_name:")
print(im_name)
# print("__appcfg: {}".format(__appcfg))
detections = False
if os.path.exists(fileName):
header = getHeader(fileName, delimiter)
hFields = { header[i]:i for i in range(0,len(header), 1) }
imgFileName = osp.join(path, im_name)
print(imgFileName)
## read image
im = plt.imread(imgFileName)
# print(im)
dim = im.shape[:2]
# print(dim)
H, W = dim[0], dim[1]
dpi = 80
figsize = W/float(dpi), H/float(dpi)
# print(figsize)
fig, ax = plt.subplots(figsize=figsize)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.,0,1,1])
ax.axis('off')
# ax.imshow(im, aspect='equal')
# ax.imshow(im.astype(np.uint8))
ax.imshow(im)
## read and create annotatation
for row in readLine(fileName, delimiter, True):
print("row:")
print(row)
if len(row) < 1:
continue
if row[ hFields["score"] ] == 'null':
continue
detections = True
print("hFields:")
print(hFields)
image_width = float(row[ hFields["image_width"] ])
image_height = float(row[ hFields["image_height"] ])
left = float(row[ hFields["left_x"] ])
top = float(row[ hFields["top_y"] ])
width = float(row[ hFields["width_w"] ])
height = float(row[ hFields["height_h"] ])
label = row[ hFields["label"] ]
score = row[ hFields["score"] ]
# print(image_width)
# print(image_height)
# print(left)
# print(top)
# print(width)
# print(height)
# print(label)
# print(score)
txtLabel = label +':\n'+score
## add bbox
ax.add_patch(plt.Rectangle((left,top), width, height, fill=False, edgecolor="red", linewidth=1.5))
## label
# ax.text(left, top, txtLabel, bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white')
## label with fancy arrow
ax.annotate(txtLabel, xy=(left, top), xytext=(left - 50, top - 50),
color='white', size=16, ha='right', bbox=dict(facecolor='blue', alpha=0.5),
arrowprops=dict(arrowstyle='fancy', fc='cyan', ec='none'))
try:
if detections or __appcfg.SAVE_NULL_RESULTS:
## Save Annotated Image
# ax.set(xlim=[0,W], ylim=[H,0], aspect=1)
# pltname = osp.basename(fileName)
pltname = im_name
pltfilename = getOutFileName(out_file, pltname, "", "")
# plt.axis('off')
plt.draw()
plt.savefig(pltfilename, dpi=dpi, transparent=False, bbox_inches='tight')
print("File saved: "+pltfilename)
## TBD: in case of no detections, put all the images and csv files in a separate directory for easy access
# if not detections and not __appcfg.SAVE_NULL_RESULTS:
# print("no detection: hence will be deleted!")
# print(fileName)
# os.path.exists(fileName) and os.remove(fileName)
except Exception as e:
print("Error: ")
print(e)
finally:
plt.close()
t4 = time.time()
print("====>: Time taken for vis_detections_from_csvfile: ")
print(t4 - t3)
return detections
def vis_annotations(out_file, fileName, left, top, width, height, label):
# read image
im = plt.imread(fileName)
# print(im)
dim = im.shape[:2]
# print(dim)
H, W = dim[0], dim[1]
dpi = 80
figsize = W/float(dpi), H/float(dpi)
# print(figsize)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(im, aspect='equal')
## add annotations
plt.axis('off')
plt.draw()
# ax.set(xlim=[0,W], ylim=[H,0], aspect=1)
pltname = osp.basename(fileName)
pltfilename = getOutFileName(out_file, pltname, "", "")
try:
plt.savefig(pltfilename, dpi=dpi, transparent=False, bbox_inches='tight')
except Exception as e:
print("Error: ")
print(e)
finally:
plt.close()
print("File saved: "+pltfilename)
def delete_no_detection_csvfile(im_name, delimiter, path, out_file, __appcfg):
print("delete_no_detection_csvfile")
fileName = getOutFileName(out_file, im_name, ".csv", __appcfg)
detections = False
for row in readLine(fileName, delimiter, True):
print("Found detections:")
detections = True
break;
if not detections:
try:
## TBD: in case of no detections, put all the images and csv files in a separate directory for easy access
if not __appcfg.SAVE_NULL_RESULTS:
print("no detection: hence will be deleted!")
print(fileName)
osp.exists(fileName) and os.remove(fileName)
except Exception as e:
print("Error: ")
print(e)
return detections
## TBD:
## 1. path as URL (http, https)
## 2. path with remote protocol access: ssh/sftp/ftp/smb path
## 3. Read textfile with the complete path of image,
## instead of taking textfile path as the base path for the images
def getImageAndPathDtls(args):
import numpy as np
dtls = {
"path":""
,"images":[]
}
path = args.PATH
if osp.isdir(path):
dtls["path"] = path
# dtls["images"] = sorted(os.listdir(path)) ## this will contain the directories also
dtls["images"] = list( getOnlyFilesInDir(path) )
else:
if osp.isfile(path):
fn, ext = osp.splitext(osp.basename(path))
print("fn, ext: {} {}".format(fn,ext))
dtls["path"] = osp.dirname(path)
if ext.lower() in args['ALLOWED_FILE_TYPE']:
# it is a file containing image names
with open(path,'r') as f:
data = f.read()
## Ref: https://stackoverflow.com/questions/1140958/whats-a-quick-one-liner-to-remove-empty-lines-from-a-python-string
gen = (i.split(args['FILE_DELIMITER'])[0] for i in data.split('\n') if i.strip("\r\n") ) # this works even if i=['100818_144130_16718_zed_l_938.jpg']
dtls["images"] = np.unique( list(gen) ).tolist()
elif ext.lower() in args['ALLOWED_IMAGE_TYPE']:
# it is a single image file
dtls["images"] = [ osp.basename(path) ] ## convert to list
return dtls;
## Ref:
## https://stackoverflow.com/questions/3853722/python-argparse-how-to-insert-newline-in-the-help-text
def parse_args_for_predict(cfg, msg=''):
import argparse
from argparse import RawTextHelpFormatter
"""Parse input arguments."""
parser = argparse.ArgumentParser(
description='DNN Application Framework - Prediction.\n * Refer: `dnncfg.yml` for model configuration details.\n * Refer: `paths.yml` environment and paths configurations.\n\n' + | |
in path_instance
assert path_instance not in Path('absolute/dummy')
def test_invalid_types(self, make_path):
path_left = Path(make_path('dummy/path/left'))
with pytest.raises(TypeError):
path_left + 1
with pytest.raises(TypeError):
path_left + (1,)
with pytest.raises(TypeError):
path_left + [1]
with pytest.raises(TypeError):
path_left + 0
with pytest.raises(TypeError):
path_left + []
with pytest.raises(TypeError):
1 + path_left
with pytest.raises(TypeError):
(1,) + path_left
with pytest.raises(TypeError):
[1] + path_left
with pytest.raises(TypeError):
0 + path_left
with pytest.raises(TypeError):
[] + path_left
def test_addition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert Path(path_left) + Path(path_right) == str(path_left) + '/' + str(path_right)
def test_raddition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert Path(path_right) + Path(path_left) == str(path_right) + '/' + str(path_left)
def test_str_addition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert Path(path_left) + path_right == str(path_left) + '/' + str(path_right)
def test_str_raddition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert path_left + Path(path_right) == str(path_left) + '/' + str(path_right)
def test_non_commutative_addition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert Path(path_right) + Path(path_left) != Path(path_left) + Path(path_right)
def test_str_non_commutative_addition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert Path(path_left) + path_right != Path(path_right) + path_left
def test_str_non_commutative_raddition_dir(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right')
assert path_left + Path(path_right) != path_right + Path(path_left)
def test_addition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
assert Path(path_left) + Path(path_right) == str(path_left) + '/' + str(path_right)
def test_raddition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
with pytest.raises(ValueError, match='It is impossible to left join a file-path: '):
Path(path_right) + Path(path_left)
def test_str_addition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
assert Path(path_left) + path_right == str(path_left) + '/' + str(path_right)
def test_str_raddition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
assert path_left + Path(path_right) == str(path_left) + '/' + str(path_right)
def test_non_commutative_addition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
with pytest.raises(ValueError, match='It is impossible to left join a file-path: '):
Path(path_right) + Path(path_left)
def test_str_non_commutative_addition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
with pytest.raises(ValueError, match='It is impossible to left join a file-path: '):
Path(path_right) + path_left
def test_str_non_commutative_raddition_file(self, make_path):
path_left = make_path('dummy/path/left')
path_right = make_path('dummy/path/right.ext')
with pytest.raises(ValueError, match='It is impossible to left join a file-path: '):
path_right + Path(path_left)
def test_associativity_absolute(self, make_path):
path = make_path('dummy')
absolute_path = make_path('/dummy')
assert Path(path) + Path(path) + Path(absolute_path) + Path(path) == str(absolute_path) + '/' + str(path)
def test_associativity(self, make_path):
path = make_path('dummy')
assert Path(path) + Path(path) + Path(path) + Path(path) == \
str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
def test_str_associativity(self, make_path):
path = make_path('dummy')
assert path + Path(path) + Path(path) + Path(path) ==\
str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert Path(path) + path + Path(path) + Path(path) == \
str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert Path(path) + Path(path) + path + Path(path) == \
str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert Path(path) + Path(path) + Path(path) + path == \
str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert Path(path) + path + path + path == str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert path + Path(path) + path + path == str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert path + (path + Path(path)) + path == str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
assert path + (path + (path + Path(path))) == str(path) + '/' + str(path) + '/' + str(path) + '/' + str(path)
class TestPathPrefix(object):
def test_common_prefix(self, make_path):
path = Path(make_path('/some/absolute/path/to/somewhere.ext'))
assert path.common_prefix('/some/absolute/path/to/elsewhere.ext') == Path('/some/absolute/path/to')
with pytest.raises(ValueError, match='No common prefix found between'):
path.common_prefix('path/to')
with pytest.raises(ValueError, match='No common prefix found between'):
path.common_prefix('some/relative/path/to/elsewhere.ext')
path = Path(str('/some/absolute/path/to/somewhere.ext'))
assert path.common_prefix(make_path('/some/absolute/path/to/elsewhere.ext')) == Path('/some/absolute/path/to')
with pytest.raises(ValueError, match='No common prefix found between'):
path.common_prefix(make_path('path/to'))
with pytest.raises(ValueError, match='No common prefix found between'):
path.common_prefix(make_path('some/relative/path/to/elsewhere.ext'))
def test_anchor_to_path(self, make_path):
path = Path(make_path('/some/absolute/path/to/somewhere.ext'))
for i, e in enumerate(path[:-1]):
assert path.anchor_to_path(e) == path[i + 1:]
with pytest.raises(ValueError):
path.anchor_to_path('non_existing_anchor')
path = Path(make_path('/some/absolute/path/to/path/to/somewhere.ext'))
for i, e in enumerate(path[:-1]):
assert path.anchor_to_path(path[:i + 1]) == path[i + 1:]
assert path.anchor_to_path('path/to') == path[5:]
def test_root_to_anchor(self, make_path):
path = Path(make_path('/some/absolute/path/to/somewhere.ext'))
for i, e in enumerate(path[1:]):
assert path.root_to_anchor(e) == path[:i + 1]
with pytest.raises(ValueError):
path.root_to_anchor('non_existing_anchor')
path = Path(make_path('/some/absolute/path/to/path/to/somewhere.ext'))
for i, e in enumerate(path[1:]):
assert path.root_to_anchor(path[i + 1:]) == path[:i + 1]
assert path.root_to_anchor('path/to') == path[:5]
class TestPathOS(object):
# BASE
# |
# |-- dirA/
# | |-- linkC -> "../dirB"
# |-- dirB/
# | |-- fileB
# | |-- linkD -> "../dirB"
# |-- dir.C/
# | |-- fileC
# | |-- fileD
# | |-- dirD/
# | | |-- fileD
# |-- fileA
# |-- linkA -> "fileA"
# |-- linkB -> "dirB"
#
@pytest.fixture(params=["pathlib", "Path", "str"])
def tmp_dir_structure(self, tmp_path, request):
base = str(tmp_path)
os.mkdir(os.path.join(base, 'dirA'))
os.mkdir(os.path.join(base, 'dirB'))
os.mkdir(os.path.join(base, 'dir.C'))
os.mkdir(os.path.join(base, 'dir.C', 'dirD'))
with open(os.path.join(base, 'fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(os.path.join(base, 'dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(os.path.join(base, 'dir.C', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(os.path.join(base, 'dir.C', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
def dirlink(src, dest):
os.symlink(src, dest)
# Relative symlinks
os.symlink('fileA', os.path.join(base, 'linkA'))
os.symlink('non-existing', os.path.join(base, 'brokenLink'))
dirlink('dirB', os.path.join(base, 'linkB'))
dirlink(os.path.join('..', 'dirB'), os.path.join(base, 'dirA', 'linkC'))
# This one goes upwards but doesn't create a loop
dirlink(os.path.join('..', 'dirB'), os.path.join(base, 'dirB', 'linkD'))
if request.param == 'pathlib':
yield pathlib.Path(str(base))
elif request.param == 'Path':
yield Path(str(base))
else:
yield str(base)
os.remove(os.path.join(base, 'dirB', 'linkD'))
os.remove(os.path.join(base, 'dirA', 'linkC'))
os.remove(os.path.join(base, 'linkB'))
os.remove(os.path.join(base, 'brokenLink'))
os.remove(os.path.join(base, 'linkA'))
os.remove(os.path.join(base, 'dir.C', 'dirD', 'fileD'))
os.remove(os.path.join(base, 'dir.C', 'fileC'))
os.remove(os.path.join(base, 'dirB', 'fileB'))
os.remove(os.path.join(base, 'fileA'))
os.rmdir(os.path.join(base, 'dir.C', 'dirD'))
os.rmdir(os.path.join(base, 'dir.C'))
os.rmdir(os.path.join(base, 'dirB'))
os.rmdir(os.path.join(base, 'dirA'))
def test_filename(self, tmp_dir_structure):
p = Path(tmp_dir_structure) / 'dir.C'
assert p.filename == ''
assert p.ext == ''
with pytest.raises(AttributeError, match='can\'t set attribute'):
p.filename = 'file'
with pytest.raises(AttributeError, match='can\'t set attribute'):
p.ext = 'ext'
p = p / 'fileC'
assert p.filename == 'fileC'
assert p.ext == ''
with pytest.raises(ValueError, match='It is impossible to left join a file-path:'):
p = p / 'file'
p = p[:-2] / 'brokenLink'
assert p.filename == ''
assert p.ext == ''
def test_glob_rglob(self, tmp_dir_structure):
assert {str(p) for p in Path(tmp_dir_structure).rglob('*')} == \
{str(p) for p in Path(tmp_dir_structure).glob('**/*')}
assert {str(p) for p in Path(tmp_dir_structure).rglob('*')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).rglob('*')}
assert {str(p) for p in Path(tmp_dir_structure).glob('dirB/*')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('dirB/*')}
assert {str(p) for p in Path(tmp_dir_structure).glob('**')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('**')}
assert {str(p) for p in Path(tmp_dir_structure).glob('**/**')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('**/**')}
assert {str(p) for p in Path(tmp_dir_structure).glob('**/**/*')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('**/**/*')}
assert {str(p) for p in Path(tmp_dir_structure).glob('dirB/**/*')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('dirB/**/*')}
assert {str(p) for p in Path(tmp_dir_structure).glob('**/dirD/*')} == \
{str(p) for p in pathlib.Path(str(tmp_dir_structure)).glob('**/dirD/*')}
def test_listdir(self, tmp_dir_structure):
assert {str(p) for p in Path(tmp_dir_structure).listdir()} == \
{str(p) for p in os.listdir(str(tmp_dir_structure))}
def test_mkdir(self, tmp_dir_structure):
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dirA').mkdir()
(Path(tmp_dir_structure) / 'dirA').mkdir(exist_ok=True)
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dir0' / 'dir1').mkdir()
(Path(tmp_dir_structure) / 'dir0' / 'dir1').mkdir(parents=True)
assert (Path(tmp_dir_structure) / 'dir0').is_dir()
assert (Path(tmp_dir_structure) / 'dir0' / 'dir1').is_dir()
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dir0' / 'dir1').mkdir(parents=True)
(Path(tmp_dir_structure) / 'dir0' / 'dir1').mkdir(parents=True, exist_ok=True)
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dirA' / 'file.ext').mkdir()
(Path(tmp_dir_structure) / 'dirA' / 'file.ext').mkdir(exist_ok=True)
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dir2' / 'dir3' / 'file.ext').mkdir()
(Path(tmp_dir_structure) / 'dir2' / 'dir3' / 'file.ext').mkdir(parents=True)
assert (Path(tmp_dir_structure) / 'dir2').is_dir()
assert (Path(tmp_dir_structure) / 'dir2' / 'dir3').is_dir()
with pytest.raises(OSError):
(Path(tmp_dir_structure) / 'dir2' / 'dir3' / 'file.ext').mkdir(parents=True)
(Path(tmp_dir_structure) / 'dir2' / 'dir3' / 'file.ext').mkdir(parents=True, exist_ok=True)
def test_walk(self, tmp_dir_structure):
assert {(str(r), tuple(str(d) for d in dirnames), tuple(str(f) for f in filenames))
for r, dirnames, filenames in Path(tmp_dir_structure).walk()} == \
{(str(r), tuple(str(d) for d in dirnames), tuple(str(f) for f in filenames))
for r, dirnames, filenames in os.walk(str(tmp_dir_structure))}
def test_is_dir(self, tmp_dir_structure):
assert (Path(tmp_dir_structure) / 'dirA').is_dir() == \
os.path.isdir(os.path.join(str(tmp_dir_structure), 'dirA'))
def test_is_file(self, tmp_dir_structure):
assert (Path(tmp_dir_structure) / 'fileA').is_file() == \
os.path.isfile(os.path.join(str(tmp_dir_structure), 'fileA'))
def test_is_symlink(self, tmp_dir_structure):
assert (Path(tmp_dir_structure) / | |
a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : integer
The number of time to repeat the array.
axis : integer
The axis along which the data will run.
References
----------
`stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
'''
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
'''
This is a helper function that implements the commonality between the
psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
elif sides not in ['onesided', 'twosided']:
raise ValueError("Unknown value for sides %s, must be one of: "
"'default', 'onesided', or 'twosided'" % sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, NFFT)
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, NFFT)
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result, windowVals = apply_window(result, window, axis=0,
return_window=True)
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = apply_window(resultY, window, axis=0)
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(windowVals).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(windowVals).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(windowVals)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(windowVals).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
result = np.concatenate((result[freqcenter:, :],
result[:freqcenter, :]), 0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
sides=None):
'''
This is a helper function that implements the commonality between the
complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if mode is None or mode == 'psd' or mode == 'default':
raise ValueError('_single_spectrum_helper does not work with %s mode'
% mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(Spectral=inspect.cleandoc("""
Fs : scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
window : callable or ndarray
A function or a vector of length *NFFT*. To create window vectors see
`window_hanning`, `window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. The
default is `window_hanning`. If a function is passed as the argument,
it must take a data segment as an argument and return the windowed
version of the segment.
sides : {'default', 'onesided', 'twosided'}
Specifies which sides of the spectrum to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided
spectrum, while 'twosided' forces two-sided.
"""))
docstring.interpd.update(Single_Spectrum=inspect.cleandoc("""
pad_to : int
The number of points to which the data segment is padded when
performing the FFT. While not increasing the actual resolution of
the spectrum (the minimum distance between resolvable peaks),
this can give more points in the plot, allowing for more
detail. This corresponds to the *n* parameter in the call to fft().
The default is None, which sets *pad_to* equal to the length of the
input signal (i.e. no padding).
"""))
docstring.interpd.update(PSD=inspect.cleandoc("""
pad_to : int
The number of points to which the data segment is padded when
performing the FFT. This can be different | |
== x_closest and G_2_1_array[i][1] == y_closest:
# add node and edge!
string_point = '{},{}'.format(x_closest, y_closest)
string_interp = '{},{}'.format(x_interp, y_interp)
G_2_1.add_node(string_interp)
G_2_1.add_edge(string_point, string_interp)
print("... point added to G_2_1")
return
if not bool_pos3:
G_2_3_array = graph_to_nodes(G_2_3)
for i in range(G_2_3_array.shape[0]):
if G_2_3_array[i][0] == x_closest and G_2_3_array[i][1] == y_closest:
# add node and edge!
string_point = '{},{}'.format(x_closest, y_closest)
string_interp = '{},{}'.format(x_interp, y_interp)
G_2_3.add_node(string_interp)
G_2_3.add_edge(string_point, string_interp)
print("... point added to G_i_1")
return
G_3_2_array = graph_to_nodes(G_3_2)
for i in range(G_3_2_array.shape[0]):
if G_3_2_array[i][0] == x_closest and G_3_2_array[i][1] == y_closest:
# add node and edge!
string_point = '{},{}'.format(x_closest, y_closest)
string_interp = '{},{}'.format(x_interp, y_interp)
G_3_2.add_node(string_interp)
G_3_2.add_edge(string_point, string_interp)
print("... point added to G_3_2")
return
if not bool_pos4:
G_3_4_array = graph_to_nodes(G_3_4)
for i in range(G_3_4_array.shape[0]):
if G_3_4_array[i][0] == x_closest and G_3_4_array[i][1] == y_closest:
# add node and edge!
string_point = '{},{}'.format(x_closest, y_closest)
string_interp = '{},{}'.format(x_interp, y_interp)
G_3_4.add_node(string_interp)
G_3_4.add_edge(string_point, string_interp)
print("... point added to G_3_4")
return
G_4_3_array = graph_to_nodes(G_4_3)
for i in range(G_4_3_array.shape[0]):
if G_4_3_array[i][0] == x_closest and G_4_3_array[i][1] == y_closest:
# add node and edge!
string_point = '{},{}'.format(x_closest, y_closest)
string_interp = '{},{}'.format(x_interp, y_interp)
G_4_3.add_node(string_interp)
G_4_3.add_edge(string_point, string_interp)
print("... point added to G_4_3")
return
# =========================================
# =================================================
baseline = 0.45
time_step = 0.1
dx = dy = 0.1
# this r = MAP; which is 100X100
r = read_pgm( 'sim_map.pgm' )
r_formatted = r.copy()
print("rows:", len(r[0][:])) # note that Y=rows are flipped
print("cols:", len(r[:][0]))
# re-mapping to something more intuitive
for row_index, row in enumerate(r): # row is for Y (inverted)
for col_index, elem in enumerate(row): # col is for X
if elem == 0: # this means there is an obstacle there
r_formatted[99 - row_index][col_index] = 0.0
else:
r_formatted[99 - row_index][col_index] = 1.0
loop_count = 0.0
x = np.array([5*dx, 95*dy, 0.0]) # this is the starting pose
# store values (plot path later)
state_array_x = np.array([0.0])
state_array_y = np.array([0.0])
state_array_theta = np.array([0.0])
# ===========================================
object_clearance_tol = 0.1
node_clearance_tol = 0.1
d_tree = 0.5
graph_connect_tol = 0.5
# as long as we have not yet formed paths....
bool_pos1 = False
bool_pos2 = False
bool_pos3 = False
bool_pos4 = False
# lets also set up some real time plotting
fig1 = plt.figure()
fig1.canvas.draw()
plt.show(block=False)
# now to set up the poses to vist on the ROUTE
# NOTE: the goal orientation angle (optional) is ignored
init_pos = np.array([0.5, 9.5])
pos1 = np.array([7.0, 1.5])
pos2 = np.array([9.0, 5.0])
pos3 = np.array([3.0, 9.5])
pos4 = np.array([0.5, 5.0])
# let us create and populate out graph structure(s)
# there will be many graph structures:
# 1) init pos to pos1
# 2) pos1 to init pos
# 3) pos1 to pos2
# 4) pos2 to pos1
# 5) pos2 to pos3
# 6) pos3 to pos2
# 7) pos3 to pos4
# 8) pos4 to pos3
print("============== NetworkX: creating graph structures ....")
G_i_1 = nx.Graph()
string = '{},{}'.format(init_pos[0], init_pos[1])
G_i_1.add_node(string, pos=(init_pos[0], init_pos[1]))
# string = '{},{}'.format(init_pos[0], init_pos[0])
# G_i_1.add_node(string, pos=(init_pos[0], init_pos[0]))
# string = '{},{}'.format(init_pos[1], init_pos[1])
# G_i_1.add_node(string, pos=(init_pos[1], init_pos[1]))
G_1_i = nx.Graph()
string = '{},{}'.format(pos1[0], pos1[1])
G_1_i.add_node(string, pos=(pos1[0], pos1[1]))
G_1_2 = nx.Graph()
string = '{},{}'.format(pos1[0], pos1[1])
G_1_2.add_node(string, pos=(pos1[0], pos1[1]))
G_2_1 = nx.Graph()
string = '{},{}'.format(pos2[0], pos2[1])
G_2_1.add_node(string, pos=(pos2[0], pos2[1]))
G_2_3 = nx.Graph()
string = '{},{}'.format(pos2[0], pos2[1])
G_2_3.add_node(string, pos=(pos2[0], pos2[1]))
G_3_2 = nx.Graph()
string = '{},{}'.format(pos3[0], pos3[1])
G_3_2.add_node(string, pos=(pos3[0], pos3[1]))
G_3_4 = nx.Graph()
string = '{},{}'.format(pos3[0], pos3[1])
G_3_4.add_node(string, pos=(pos3[0], pos3[1]))
G_4_3 = nx.Graph()
string = '{},{}'.format(pos4[0], pos4[1])
G_4_3.add_node(string, pos=(pos4[0], pos4[1]))
# The technique used is single query for each path individually
# now solving our RRT paths in a loop
while loop_count <= 1000: # break in code included
print("Begin loop >>>>>>>>>>>>>>")
# first extract all nodes from graphs....
all_nodes_array = np.array([0.0, 0.0])
# if we found the path already no need to consider those graphs
if not bool_pos1:
nodes_array_1 = graph_to_nodes(G_i_1)
nodes_array_2 = graph_to_nodes(G_1_i)
all_nodes_array = np.vstack((all_nodes_array, nodes_array_1))
all_nodes_array = np.vstack((all_nodes_array, nodes_array_2))
if not bool_pos2:
nodes_array_3 = graph_to_nodes(G_1_2)
nodes_array_4 = graph_to_nodes(G_2_1)
all_nodes_array = np.vstack((all_nodes_array, nodes_array_3))
all_nodes_array = np.vstack((all_nodes_array, nodes_array_4))
if not bool_pos3:
nodes_array_5 = graph_to_nodes(G_2_3)
nodes_array_6 = graph_to_nodes(G_3_2)
all_nodes_array = np.vstack((all_nodes_array, nodes_array_5))
all_nodes_array = np.vstack((all_nodes_array, nodes_array_6))
if not bool_pos4:
nodes_array_7 = graph_to_nodes(G_3_4)
nodes_array_8 = graph_to_nodes(G_4_3)
all_nodes_array = np.vstack((all_nodes_array, nodes_array_7))
all_nodes_array = np.vstack((all_nodes_array, nodes_array_8))
all_nodes_array_final = all_nodes_array[1:]
print("All nodes: ", all_nodes_array_final)
print("shape of all nodes: ", all_nodes_array_final.shape)
# now add a random point and see if it collides
obj_distance = 0.0
node_distance = 1000.0
interp_distance = 1000.0
collision = True
# keep trying if random point isL
# 1) too close to another node
# 2) too close/ colliding with an object (point or path)
# after moving d_tree distance away from closest node on any tree
while obj_distance < object_clearance_tol or node_distance < node_clearance_tol or interp_distance > d_tree or collision:
print(">>>>>>> A new random point")
# as long as x_rand is close to colliding (<0.1 from obstacle), keep trying...
# rand() returns random numb. from 0 to 1
rand_x = round(abs(np.random.rand())*10.0, 2)
rand_y = round(abs(np.random.rand())*10.0, 2)
x_rand = np.array([rand_x, rand_y])
print("random point: ", x_rand)
node_distance = 1000.0
# now ensure that we are a distance 0.1 from all other nodes
closest_node_index = 0
print("all nodes array shape: ", all_nodes_array_final.shape)
for i in range(all_nodes_array_final.shape[0]): # solves the closest node
node_distance_temp = np.sqrt(np.square(x_rand[0]-all_nodes_array_final[i][0])+np.square(x_rand[1]-all_nodes_array_final[i][1]))
if node_distance_temp < node_distance:
node_distance = node_distance_temp
closest_node_index = i
print("random point distance from closest node: ", node_distance)
print("and this closest node has index: ", closest_node_index, " and coord's: ", (all_nodes_array_final[closest_node_index][0], all_nodes_array_final[closest_node_index][1]))
# if the node distance if greater than d_tree, interpolate between the points
# done by moving rand point progressively closer until dist <= d_tree !!
x_interp = rand_x
y_interp = rand_y
x_closest = all_nodes_array_final[closest_node_index][0]
y_closest = all_nodes_array_final[closest_node_index][1]
if x_interp == x_closest:
print("try another random point... (equal x values)")
continue # to avoid error when dividing by zero (just try another random point)
gradient = (y_interp - y_closest) / (x_interp - x_closest)
if abs(y_interp - y_closest) <= abs(x_interp - x_closest):
if x_interp > x_closest:
dx_grad = -0.01
dy_grad = dx_grad * gradient
else:
dx_grad = 0.01
dy_grad = dx_grad * gradient
else:
if y_interp > y_closest:
dy_grad = -0.01
dx_grad = dy_grad * (1 / gradient)
else:
dy_grad = 0.01
dx_grad = dy_grad * (1 / gradient)
interp_distance = np.sqrt(np.square(x_interp - all_nodes_array_final[closest_node_index][0])
+ np.square(y_interp - all_nodes_array_final[closest_node_index][1]))
print("the initial interpolated distance: ", interp_distance)
print("the interpolated point: ", (x_interp, y_interp))
while interp_distance > d_tree:
x_interp = x_interp + dx_grad
y_interp = y_interp + dy_grad
interp_distance = np.sqrt(np.square(x_interp - all_nodes_array_final[closest_node_index][0])
+ np.square(y_interp - all_nodes_array_final[closest_node_index][1]))
# get the final interpolated point
print("final interpolated point: ", (x_interp, y_interp))
print("distance from closest node: ", interp_distance)
# now we can FINALLY check for collision and either make the collision boolean False
# or keep it True (if there is a collision)
collision = bresenham_collisions_single(np.array([x_interp, y_interp, x_closest, y_closest]))
print("collision?: ", collision)
# and the distance from an object?
obj_distance, (x_dist, y_dist) = dist_obstacle(np.array([x_interp, y_interp]))
print("interp point distance (from object): ", obj_distance)
# ===============================================================
# The point is now ready to be added to the closest graph
# the point to be added is x_interp, y_interp
# the closest point on the graph is x_closest, y_closest
add_to_closest_graph(x_interp, y_interp, x_closest, y_closest,
G_i_1, G_1_i, G_1_2, G_2_1, G_2_3, G_3_2, G_3_4, G_4_3,
bool_pos1, bool_pos2, bool_pos3, bool_pos4)
# now we try to connect the graphs
# (if their nodes are less than a distance apart and there are no collisions)
# first find the closest points between the 2 graphs
graph_connect_tol = 0.5
# ===================== for path 1
if not bool_pos1:
print("Attempting to connect graphs for PATH 1 ...")
G_i_1_array = graph_to_nodes(G_i_1)
G_1_i_array = graph_to_nodes(G_1_i)
graph_dist = 1000.0 # just a temporary placeholder
# find closest points between graphs
for i in range(G_i_1_array.shape[0]):
for j in range(G_1_i_array.shape[0]):
graph_dist_temp = np.sqrt(np.square(G_i_1_array[i][0] - G_1_i_array[j][0]) + np.square(G_i_1_array[i][1] - G_1_i_array[j][1]))
if graph_dist_temp < graph_dist:
graph_dist = graph_dist_temp
point_1_x = G_i_1_array[i][0]
point_1_y = G_i_1_array[i][1]
point_2_x = G_1_i_array[j][0]
point_2_y = G_1_i_array[j][1]
print("distance btw graphs: ", graph_dist)
# connect the closest points if they are reasonably close and do not have collisions between them
if not bresenham_collisions_single(np.array([point_1_x, point_1_y, point_2_x, point_2_y])):
print("there is no collision between the points on either graph!")
if graph_dist < graph_connect_tol:
print(" and they are close enough to connect! dist: ", graph_dist)
string_1 = '{},{}'.format(point_1_x, point_1_y)
string_2 = '{},{}'.format(point_2_x, point_2_y)
G_i_1.add_edge(string_1, string_2)
# the path is found!
bool_pos1 = True
# and merge these graphs!
G_path_1 = nx.compose(G_i_1, G_1_i)
# | |
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class LogInspectionRulesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_log_inspection_rule(self, log_inspection_rule, api_version, **kwargs): # noqa: E501
"""Create a Log Inspection Rule # noqa: E501
Create a new log inspection rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_log_inspection_rule(log_inspection_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LogInspectionRule log_inspection_rule: The settings of the new log inspection rule. (required)
:param str api_version: The version of the api being called. (required)
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_log_inspection_rule_with_http_info(log_inspection_rule, api_version, **kwargs) # noqa: E501
else:
(data) = self.create_log_inspection_rule_with_http_info(log_inspection_rule, api_version, **kwargs) # noqa: E501
return data
def create_log_inspection_rule_with_http_info(self, log_inspection_rule, api_version, **kwargs): # noqa: E501
"""Create a Log Inspection Rule # noqa: E501
Create a new log inspection rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_log_inspection_rule_with_http_info(log_inspection_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LogInspectionRule log_inspection_rule: The settings of the new log inspection rule. (required)
:param str api_version: The version of the api being called. (required)
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['log_inspection_rule', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_log_inspection_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'log_inspection_rule' is set
if ('log_inspection_rule' not in params or
params['log_inspection_rule'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule` when calling `create_log_inspection_rule`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `create_log_inspection_rule`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'log_inspection_rule' in params:
body_params = params['log_inspection_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/loginspectionrules', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogInspectionRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_log_inspection_rule(self, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Delete a Log Inspection Rule # noqa: E501
Delete a log inspection rule by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_log_inspection_rule(log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int log_inspection_rule_id: The ID number of the log inspection rule to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.delete_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, **kwargs) # noqa: E501
return data
def delete_log_inspection_rule_with_http_info(self, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Delete a Log Inspection Rule # noqa: E501
Delete a log inspection rule by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int log_inspection_rule_id: The ID number of the log inspection rule to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['log_inspection_rule_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_log_inspection_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'log_inspection_rule_id' is set
if ('log_inspection_rule_id' not in params or
params['log_inspection_rule_id'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule_id` when calling `delete_log_inspection_rule`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `delete_log_inspection_rule`") # noqa: E501
if 'log_inspection_rule_id' in params and not re.search('\\d+', str(params['log_inspection_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `log_inspection_rule_id` when calling `delete_log_inspection_rule`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'log_inspection_rule_id' in params:
path_params['logInspectionRuleID'] = params['log_inspection_rule_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/loginspectionrules/{logInspectionRuleID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def describe_log_inspection_rule(self, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a Log Inspection Rule # noqa: E501
Describe a log inspection rule by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_log_inspection_rule(log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int log_inspection_rule_id: The ID number of the log inspection rule to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, **kwargs) # noqa: E501
return data
def describe_log_inspection_rule_with_http_info(self, log_inspection_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a Log Inspection Rule # noqa: E501
Describe a log inspection rule by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_log_inspection_rule_with_http_info(log_inspection_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int log_inspection_rule_id: The ID number of the log inspection rule to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: LogInspectionRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['log_inspection_rule_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_log_inspection_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'log_inspection_rule_id' is set
if ('log_inspection_rule_id' not in params or
params['log_inspection_rule_id'] is None):
raise ValueError("Missing the required parameter `log_inspection_rule_id` when calling `describe_log_inspection_rule`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_log_inspection_rule`") # noqa: E501
if 'log_inspection_rule_id' in params and not re.search('\\d+', str(params['log_inspection_rule_id'])): # noqa: | |
- 2*y, 0,
-2*x*y + 1, 0, 6*x + y, 0, x + 2*y, -6*x - y, 0]
Exterior algebras inherit from Clifford algebras, so
supercommutators work as well. We verify the exterior algebra
is supercommutative::
sage: E.<x,y,z,w> = ExteriorAlgebra(QQ)
sage: all(b1.supercommutator(b2) == 0
....: for b1 in E.basis() for b2 in E.basis())
True
"""
P = self.parent()
ret = P.zero()
for ms,cs in self:
for mx,cx in x:
ret += P.term(ms, cs) * P.term(mx, cx)
s = (-1)**(P.degree_on_basis(ms) * P.degree_on_basis(mx))
ret -= s * P.term(mx, cx) * P.term(ms, cs)
return ret
class CliffordAlgebra(CombinatorialFreeModule):
r"""
The Clifford algebra of a quadratic form.
Let `Q : V \to \mathbf{k}` denote a quadratic form on a vector space `V`
over a field `\mathbf{k}`. The Clifford algebra `Cl(V, Q)` is defined as
`T(V) / I_Q` where `T(V)` is the tensor algebra of `V` and `I_Q` is the
two-sided ideal generated by all elements of the form `v \otimes v - Q(v)`
for all `v \in V`.
We abuse notation to denote the projection of a pure tensor
`x_1 \otimes x_2 \otimes \cdots \otimes x_m \in T(V)` onto
`T(V) / I_Q = Cl(V, Q)` by `x_1 \wedge x_2 \wedge \cdots \wedge x_m`.
This is motivated by the fact that `Cl(V, Q)` is the exterior algebra
`\wedge V` when `Q = 0` (one can also think of a Clifford algebra as
a quantization of the exterior algebra). See :class:`ExteriorAlgebra`
for the concept of an exterior algebra.
From the definition, a basis of `Cl(V, Q)` is given by monomials of
the form
.. MATH::
\{ e_{i_1} \wedge \cdots \wedge e_{i_k} \mid 1 \leq i_1 < \cdots <
i_k \leq n \},
where `n = \dim(V)` and where `\{ e_1, e_2, \cdots, e_n \}` is any
fixed basis of `V`. Hence
.. MATH::
\dim(Cl(V, Q)) = \sum_{k=0}^n \binom{n}{k} = 2^n.
.. NOTE::
The algebra `Cl(V, Q)` is a `\ZZ / 2\ZZ`-graded algebra, but not
(in general) `\ZZ`-graded (in a reasonable way).
This construction satisfies the following universal property. Let
`i : V \to Cl(V, Q)` denote the natural inclusion (which is an
embedding). Then for every associative `\mathbf{k}`-algebra `A`
and any `\mathbf{k}`-linear map `j : V \to A` satisfying
.. MATH::
j(v)^2 = Q(v) \cdot 1_A
for all `v \in V`, there exists a unique `\mathbf{k}`-algebra
homomorphism `f : Cl(V, Q) \to A` such that `f \circ i = j`.
This property determines the Clifford algebra uniquely up to
canonical isomorphism. The inclusion `i` is commonly used to
identify `V` with a vector subspace of `Cl(V)`.
The Clifford algebra `Cl(V, Q)` is a `\ZZ_2`-graded algebra
(where `\ZZ_2 = \ZZ / 2 \ZZ`); this grading is determined by
placing all elements of `V` in degree `1`. It is also an
`\NN`-filtered algebra, with the filtration too being defined
by placing all elements of `V` in degree `1`. The :meth:`degree` gives
the `\NN`-*filtration* degree, and to get the super degree use instead
:meth:`~sage.categories.super_modules.SuperModules.ElementMethods.is_even_odd`.
The Clifford algebra also can be considered as a covariant functor
from the category of vector spaces equipped with quadratic forms
to the category of algebras. In fact, if `(V, Q)` and `(W, R)`
are two vector spaces endowed with quadratic forms, and if
`g : W \to V` is a linear map preserving the quadratic form,
then we can define an algebra morphism
`Cl(g) : Cl(W, R) \to Cl(V, Q)` by requiring that it send every
`w \in W` to `g(w) \in V`. Since the quadratic form `R` on `W`
is uniquely determined by the quadratic form `Q` on `V` (due to
the assumption that `g` preserves the quadratic form), this fact
can be rewritten as follows: If `(V, Q)` is a vector space with a
quadratic form, and `W` is another vector space, and
`\phi : W \to V` is any linear map, then we obtain an algebra
morphism `Cl(\phi) : Cl(W, \phi(Q)) \to Cl(V, Q)` where
`\phi(Q) = \phi^T \cdot Q \cdot \phi` (we consider `\phi` as a
matrix) is the quadratic form `Q` pulled back to `W`. In fact, the
map `\phi` preserves the quadratic form because of
.. MATH::
\phi(Q)(x) = x^T \cdot \phi^T \cdot Q \cdot \phi \cdot x
= (\phi \cdot x)^T \cdot Q \cdot (\phi \cdot x) = Q(\phi(x)).
Hence we have `\phi(w)^2 = Q(\phi(w)) = \phi(Q)(w)` for all `w \in W`.
REFERENCES:
- :wikipedia:`Clifford_algebra`
INPUT:
- ``Q`` -- a quadratic form
- ``names`` -- (default: ``'e'``) the generator names
EXAMPLES:
To create a Clifford algebra, all one needs to do is specify a
quadratic form::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl = CliffordAlgebra(Q)
sage: Cl
The Clifford algebra of the Quadratic form in 3 variables
over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
We can also explicitly name the generators. In this example, the
Clifford algebra we construct is an exterior algebra (since we
choose the quadratic form to be zero)::
sage: Q = QuadraticForm(ZZ, 4, [0]*10)
sage: Cl.<a,b,c,d> = CliffordAlgebra(Q)
sage: a*d
a*d
sage: d*c*b*a + a + 4*b*c
a*b*c*d + 4*b*c + a
"""
@staticmethod
def __classcall_private__(cls, Q, names=None):
"""
Normalize arguments to ensure a unique representation.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl1.<e0,e1,e2> = CliffordAlgebra(Q)
sage: Cl2 = CliffordAlgebra(Q)
sage: Cl3 = CliffordAlgebra(Q, ['e0','e1','e2'])
sage: Cl1 is Cl2 and Cl2 is Cl3
True
"""
if not isinstance(Q, QuadraticForm):
raise ValueError("{} is not a quadratic form".format(Q))
if names is None:
names = 'e'
names = tuple(names)
if len(names) != Q.dim():
if len(names) == 1:
names = tuple( '{}{}'.format(names[0], i) for i in range(Q.dim()) )
else:
raise ValueError("the number of variables does not match the number of generators")
return super(CliffordAlgebra, cls).__classcall__(cls, Q, names)
def __init__(self, Q, names, category=None):
r"""
Initialize ``self``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl = CliffordAlgebra(Q)
sage: Cl.category()
Category of finite dimensional super algebras with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
sage: TestSuite(Cl).run()
TESTS:
We check that the basis elements are indeed indexed by
*strictly increasing* tuples::
sage: Q = QuadraticForm(ZZ, 9)
sage: Cl = CliffordAlgebra(Q)
sage: ba = Cl.basis().keys()
sage: all( tuple(sorted(S)) in ba
....: for S in Subsets(range(9)) )
True
"""
self._quadratic_form = Q
R = Q.base_ring()
category = AlgebrasWithBasis(R.category()).Super().Filtered().FiniteDimensional().or_subcategory(category)
indices = SubsetsSorted(range(Q.dim()))
CombinatorialFreeModule.__init__(self, R, indices, category=category)
self._assign_names(names)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: CliffordAlgebra(Q)
The Clifford algebra of the Quadratic form in 3 variables
over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
"""
return "The Clifford algebra of the {}".format(self._quadratic_form)
def _repr_term(self, m):
"""
Return a string representation of the basis element indexed by ``m``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: Cl._repr_term((0,2))
'x*z'
sage: Cl._repr_term(())
'1'
sage: Cl._repr_term((1,))
'y'
"""
if not m:
return '1'
term = ''
for i in m:
if term:
term += '*'
term += self.variable_names()[i]
return term
def _latex_term(self, m):
r"""
Return a `\LaTeX` representation of the basis element indexed
by ``m``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: Cl._latex_term((0,2))
' x z'
"""
if not m:
return '1'
term = ''
for i in m:
term += ' ' + self.latex_variable_names()[i]
return term
def _coerce_map_from_(self, V):
"""
Return if there is a coerce map from ``V`` into ``self``.
The things which coerce into ``self`` are:
- Clifford algebras with the same generator names and an equal
quadratic form over a ring which coerces into the base
ring of ``self``.
- The underlying free module of ``self``.
- The base ring of ``self``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Qp = QuadraticForm(QQ, 3, [1,2,3,4,5,6])
sage: Cl | |
<filename>my_models/resnet.py
# -*- coding:utf-8 -*-
# ResNet模型代码
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
# ResNet中使用了BatchNorm层,在卷积层的后面加上BatchNorm以提升数值稳定性
# 定义卷积批归一化块
class ConvBNLayer(paddle.nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
"""
num_channels, 卷积层的输入通道数
num_filters, 卷积层的输出通道数
stride, 卷积层的步幅
groups, 分组卷积的组数,默认groups=1不使用分组卷积
"""
super(ConvBNLayer, self).__init__()
# 创建卷积层
self._conv = nn.Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
bias_attr=False)
# 创建BatchNorm层
self._batch_norm = paddle.nn.BatchNorm2D(num_filters)
self.act = act
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
if self.act == 'leaky':
y = F.leaky_relu(x=y, negative_slope=0.1)
elif self.act == 'relu':
y = F.relu(x=y)
return y
# 定义残差块
# 每个残差块会对输入图片做三次卷积,然后跟输入图片进行短接
# 如果残差块中第三次卷积输出特征图的形状与输入不一致,则对输入图片做1x1卷积,将其输出形状调整成一致
class BottleneckBlock(paddle.nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
shortcut=True):
super(BottleneckBlock, self).__init__()
# 创建第一个卷积层 1x1
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu')
# 创建第二个卷积层 3x3
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu')
# 创建第三个卷积 1x1,但输出通道数乘以4
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None)
# 如果conv2的输出跟此残差块的输入数据形状一致,则shortcut=True
# 否则shortcut = False,添加1个1x1的卷积作用在输入数据上,使其形状变成跟conv2一致
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride)
self.shortcut = shortcut
self._num_channels_out = num_filters * 4
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
# 如果shortcut=True,直接将inputs跟conv2的输出相加
# 否则需要对inputs进行一次卷积,将形状调整成跟conv2输出一致
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=conv2)
y = F.relu(y)
return y
# 定义ResNet模型
class ResNet(paddle.nn.Layer):
def __init__(self, layers=50, num_classes=1):
"""
layers, 网络层数,可以是50, 101或者152
class_dim,分类标签的类别数
"""
super(ResNet, self).__init__()
self.layers = layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
#ResNet50包含多个模块,其中第2到第5个模块分别包含3、4、6、3个残差块
depth = [3, 4, 6, 3]
elif layers == 101:
#ResNet101包含多个模块,其中第2到第5个模块分别包含3、4、23、3个残差块
depth = [3, 4, 23, 3]
elif layers == 152:
#ResNet152包含多个模块,其中第2到第5个模块分别包含3、8、36、3个残差块
depth = [3, 8, 36, 3]
# 残差块中使用到的卷积的输出通道数
num_filters = [64, 128, 256, 512]
# ResNet的第一个模块,包含1个7x7卷积,后面跟着1个最大池化层
self.conv = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
self.pool2d_max = nn.MaxPool2D(
kernel_size=3,
stride=2,
padding=1)
# ResNet的第二到第五个模块c2、c3、c4、c5
self.bottleneck_block_list = []
num_channels = 64
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
# c3、c4、c5将会在第一个残差块使用stride=2;其余所有残差块stride=1
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut))
num_channels = bottleneck_block._num_channels_out
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
# 在c5的输出特征图上使用全局池化
self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(output_size=1)
# stdv用来作为全连接层随机初始化参数的方差
import math
stdv = 1.0 / math.sqrt(2048 * 1.0)
# 创建全连接层,输出大小为类别数目,经过残差网络的卷积和全局池化后,
# 卷积特征的维度是[B,2048,1,1],故最后一层全连接的输入维度是2048
self.out = nn.Linear(in_features=2048, out_features=num_classes,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Uniform(-stdv, stdv)))
def forward(self, inputs, label=None):
y = self.conv(inputs)
y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y)
y = self.pool2d_avg(y)
# y = paddle.reshape(y, [y.shape[0], -1])
y = paddle.flatten(y, 1, -1)
y = self.out(y)
if label is not None:
acc = paddle.metric.accuracy(input=y, label=label)
return y, acc
else:
return y
# 打印模型
from paddle.vision.models import resnet50, vgg16, LeNet, mobilenet_v2
from paddle.static import InputSpec
inputs = InputSpec([None, 3*224*224], 'float32', 'x')
labels = InputSpec([None, 10], 'float32', 'x')
model = paddle.Model(ResNet(num_classes=10), inputs, labels)
# model = paddle.Model(mobilenet_v2(num_classes=10),inputs,labels)
# 模型可视化
model.summary((-1,3,224,224))
# ResNet50
# -------------------------------------------------------------------------------
# Layer (type) Input Shape Output Shape Param #
# ===============================================================================
# Conv2D-1 [[1, 3, 224, 224]] [1, 64, 112, 112] 9,408
# BatchNorm2D-1 [[1, 64, 112, 112]] [1, 64, 112, 112] 256
# ConvBNLayer-1 [[1, 3, 224, 224]] [1, 64, 112, 112] 0
# MaxPool2D-1 [[1, 64, 112, 112]] [1, 64, 56, 56] 0
# Conv2D-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 4,096
# BatchNorm2D-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-2 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
# BatchNorm2D-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-3 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-4 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
# BatchNorm2D-4 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
# ConvBNLayer-4 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
# Conv2D-5 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
# BatchNorm2D-5 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
# ConvBNLayer-5 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
# BottleneckBlock-1 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
# Conv2D-6 [[1, 256, 56, 56]] [1, 64, 56, 56] 16,384
# BatchNorm2D-6 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-6 [[1, 256, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
# BatchNorm2D-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-7 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-8 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
# BatchNorm2D-8 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
# ConvBNLayer-8 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
# BottleneckBlock-2 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
# Conv2D-9 [[1, 256, 56, 56]] [1, 64, 56, 56] 16,384
# BatchNorm2D-9 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-9 [[1, 256, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 36,864
# BatchNorm2D-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 256
# ConvBNLayer-10 [[1, 64, 56, 56]] [1, 64, 56, 56] 0
# Conv2D-11 [[1, 64, 56, 56]] [1, 256, 56, 56] 16,384
# BatchNorm2D-11 [[1, 256, 56, 56]] [1, 256, 56, 56] 1,024
# ConvBNLayer-11 [[1, 64, 56, 56]] [1, 256, 56, 56] 0
# BottleneckBlock-3 [[1, 256, 56, 56]] [1, 256, 56, 56] 0
# Conv2D-12 [[1, 256, 56, 56]] [1, 128, 56, 56] 32,768
# BatchNorm2D-12 [[1, 128, 56, 56]] [1, 128, 56, 56] 512
# ConvBNLayer-12 [[1, 256, 56, 56]] [1, 128, 56, 56] 0
# Conv2D-13 [[1, 128, 56, 56]] [1, 128, 28, 28] 147,456
# BatchNorm2D-13 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-13 [[1, 128, 56, 56]] [1, 128, 28, 28] 0
# Conv2D-14 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
# BatchNorm2D-14 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
# ConvBNLayer-14 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
# Conv2D-15 [[1, 256, 56, 56]] [1, 512, 28, 28] 131,072
# BatchNorm2D-15 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
# ConvBNLayer-15 [[1, 256, 56, 56]] [1, 512, 28, 28] 0
# BottleneckBlock-4 [[1, 256, 56, 56]] [1, 512, 28, 28] 0
# Conv2D-16 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
# BatchNorm2D-16 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-16 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-17 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
# BatchNorm2D-17 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-17 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-18 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
# BatchNorm2D-18 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
# ConvBNLayer-18 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
# BottleneckBlock-5 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
# Conv2D-19 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
# BatchNorm2D-19 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-19 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-20 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
# BatchNorm2D-20 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-20 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-21 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
# BatchNorm2D-21 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
# ConvBNLayer-21 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
# BottleneckBlock-6 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
# Conv2D-22 [[1, 512, 28, 28]] [1, 128, 28, 28] 65,536
# BatchNorm2D-22 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-22 [[1, 512, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-23 [[1, 128, 28, 28]] [1, 128, 28, 28] 147,456
# BatchNorm2D-23 [[1, 128, 28, 28]] [1, 128, 28, 28] 512
# ConvBNLayer-23 [[1, 128, 28, 28]] [1, 128, 28, 28] 0
# Conv2D-24 [[1, 128, 28, 28]] [1, 512, 28, 28] 65,536
# BatchNorm2D-24 [[1, 512, 28, 28]] [1, 512, 28, 28] 2,048
# ConvBNLayer-24 [[1, 128, 28, 28]] [1, 512, 28, 28] 0
# BottleneckBlock-7 [[1, 512, 28, 28]] [1, 512, 28, 28] 0
# Conv2D-25 [[1, 512, 28, 28]] [1, 256, 28, 28] 131,072
# BatchNorm2D-25 [[1, 256, 28, 28]] [1, 256, 28, 28] 1,024
# ConvBNLayer-25 [[1, | |
import model.model as model
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
import scipy
import math
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Sign
from pandas import DataFrame as df
from collections import OrderedDict
from plotly.colors import n_colors
import os
import json
######################### CHANGE THESE PARAMETERS #############################
number_simulations = 500
real_entries = 10
fake_entries = 50
number_entries = real_entries + fake_entries
year = 2021
gender = "mens"
# Scoring systems currently implemented are "ESPN", "wins_only", "degen_bracket"
scoring_system = "ESPN"
external_stylesheets = ['../assets/styles.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title='March Madness Simulator'
# Helper function
# TODO There may be a more effective way of doing this in pandas
def get_array_from_dataframe(frame, array_type, data_type):
return frame[frame['name']==data_type][array_type].values[0]
def count_occurrences(data):
dictionary = {}
increment = 1/len(data)
for i in data:
if not dictionary.get(i):
dictionary[i] = 0
dictionary[i] += increment
ordered = OrderedDict(sorted(dictionary.items()))
return ordered
# Ranks graph function
def prepare_ranks_graph(results):
group_labels = [result for result in results['name']]
array_results = [get_array_from_dataframe(results, 'ranks', result) for result in group_labels]
try:
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=1,
histnorm='probability')
except:
print('Singular matrix error')
raise PreventUpdate
# figure = ff.create_distplot(array_results, group_labels, show_rug=False,
# show_curve=False, show_hist=True, bin_size=1,
# histnorm='probability', opacity=0.5)
figure.update_layout(
title_text='Histogram of Final Placements',
xaxis_title='Placing',
yaxis_title='Share of Simulations'
)
return figure
# Scores graph function
def prepare_scores_graph(results):
# overall_winning_score_values = get_array_from_dataframe(special_results, 'simulations', 'winning_score')
group_labels = [result for result in results['name']]
array_results = [get_array_from_dataframe(results, 'simulations', result) for result in group_labels]
# hist_data = [overall_winning_score_values, chalk_values, most_valuable_values, most_popular_values]
# group_labels = ['Winning Score', 'Chalk', 'Most Valuable', 'Most Popular']
# figure = go.Figure()
# converted_array_results = [count_occurrences(data) for data in array_results]
# for i in range(len(converted_array_results)):
# figure.add_trace(go.Scatter(name=group_labels[i],x=list(converted_array_results[i].keys()),y=list(converted_array_results[i].values())))
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=10,
histnorm='probability')
# colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 12, colortype='rgb')
# figure = go.Figure()
# for array, label in zip(array_results, group_labels):
# figure.add_trace(go.Violin(y=array, box_visible=False, line_color='black',
# meanline_visible=True, opacity=0.6,
# x0=label))
# figure.update_layout(yaxis_zeroline=False)
# for array, color, name in zip(array_results, colors, group_labels):
# figure.add_trace(go.Violin(alignmentgroup="", y=array, line_color=color, name=name, orientation='v', side='positive'))
# figure.update_traces(orientation='v', side='positive', meanline_visible=True,
# points=False,
# jitter=1.00,
# )
# figure.update_traces(orientation='h', side='positive', width=3, points=False)
# figure.update_layout(violinmode='overlay', violingroupgap=0, violingap=0)
figure.update_layout(
title_text='Histogram of Final Scores',
xaxis_title='Score',
yaxis_title='Share of Simulations'
)
return figure
# Table preparation function
def prepare_table(entry_results, special_results, sims):
def get_sub_placings(data_set, place, inclusive=False, percentile=False, average=False):
i=0
if average:
return round(np.average(data_set),1)
if percentile:
place = math.ceil(place/100*(len(entry_results)))
for score in data_set:
if score>place:
break
if percentile and score<=place:
i+=1
elif inclusive and score<=place:
i+=1
elif score==place:
i+=1
return round(i/sims, 3)
def convert_entry_to_dictionary(dataframe, name):
ranks = get_array_from_dataframe(dataframe, 'placings', name)
ranks.sort()
index = dataframe[dataframe['name'] == name]['entryID'].values[0]
percentiles = [get_sub_placings(ranks, 25, percentile=True),
get_sub_placings(ranks, 50, percentile=True),
get_sub_placings(ranks, 75, percentile=True),
# get_sub_placings(ranks, 80, percentile=True),
1]
entry = {
'Index': index,
'Entry': name,
'1st': get_sub_placings(ranks, 1),
'2nd': get_sub_placings(ranks, 2),
# '3rd': get_sub_placings(ranks, 3),
# 'Top Five': get_sub_placings(ranks, 5, inclusive=True),
# 'Top Ten': get_sub_placings(ranks, 10, inclusive=True),
'1st Q.': percentiles[0],
'2nd Q.': percentiles[1]-percentiles[0],
'3rd Q.': percentiles[2]-percentiles[1],
'4th Q.': percentiles[3]-percentiles[2],
# '5th Q.': percentiles[4]-percentiles[3],
'Avg Plc.': get_sub_placings(ranks, 0, average=True),
}
return entry
# Get rankings and then sort them
data_array = []
data_array.append(convert_entry_to_dictionary(special_results, 'most_valuable_teams'))
data_array.append(convert_entry_to_dictionary(special_results, 'most_popular_teams'))
data_array.append(convert_entry_to_dictionary(special_results, 'chalk'))
for entry in entry_results['name']:
data_array.append(convert_entry_to_dictionary(entry_results, entry))
print("updating table viz")
return data_array
# As currently written, changing the maximum value here is okay. Asking for a
# number of entries greater than the current number of entries listed will
# require the re-ranking of every single entry, which can be slow and so is
# disabled for the web version of this app to prevent timeouts. However, this
# can be changed if you're running this locally.
def prepare_number_entries_input():
entries_input = dcc.Input(
id='number-entries-input',
type='number',
value=number_entries,
max=number_entries,
min=0
)
return entries_input
# Unlike with the number of entries, the number of simulations cannot exceed
# the original number simulations run. If you want to add simulations you will
# need to restart from the very beginning with a greater number.
def prepare_number_simulations_input():
simulations_input = dcc.Input(
id='number-simulations-input',
type='number',
value=number_simulations,
max=number_simulations,
min=0
)
return simulations_input
def prepare_run_button_input():
button = html.Button(id='run-input', n_clicks=0, children='Run Subgroup Analysis')
return button
# Callback to update once results change
@app.callback(
[Output(component_id='scoring-table', component_property='data'),
Output(component_id='scoring-table', component_property='selected_rows'),
Output('hidden-dataframe', 'children')],
[Input(component_id='run-input', component_property='n_clicks')],
[State('number-entries-input', 'value'),
State('number-simulations-input', 'value')])
def update_table(n_clicks, entry_input, simulations_input):
global all_results
current_number_of_entries = len(all_results['entryID'])-4
if current_number_of_entries < entry_input:
m.add_bulk_entries_from_database(entry_input-current_number_of_entries)
m.add_simulation_results_postprocessing()
all_results = m.output_results()
special_wins = m.get_special_wins()
special_results = all_results[-4:]
entry_results = all_results[:-4]
filtered_dataframe = m.analyze_sublist(all_results, entry_input, simulations_input)
filtered_special_results = filtered_dataframe[-4:]
filtered_entry_results = filtered_dataframe[:-4]
scoring_table = prepare_table(filtered_entry_results, filtered_special_results, simulations_input)
print("update complete")
return scoring_table, [0, 1], filtered_dataframe.to_json(orient='split')
# Create each individual region
def create_region(region, stages, initial_game_number):
stage_html_list=[]
for stage in stages:
game_html_list = []
for i in range(stages[stage]):
game_html_list.append(html.Div([
html.Div('', id='game'+str(initial_game_number)+'-team1', className='team team1'),
html.Div('', id='game'+str(initial_game_number)+'-team2', className='team team2'),
], id='game'+str(initial_game_number), className=region+' '+stage+' g'+str(i)+' game'))
initial_game_number+=1
stage_html_list.append(
html.Div(game_html_list, className='inner-bounding '+stage))
return html.Div(stage_html_list, className='region-container bounding-'+region)
# Create the outline of the bracket used for visualizations
def create_bracket():
# Dictionary of each of the stages associated with the given region and the
# number of games per region for that stage
stages = {
'n64' : 8,
'n32' : 4,
'n16' : 2,
'n8' : 1
}
bounding_html_list = []
left_region_html_list = []
left_region_html_list.append(create_region('r1', stages, 0))
left_region_html_list.append(create_region('r2', stages, 15))
right_region_html_list = []
right_region_html_list.append(create_region('r3', stages, 30))
right_region_html_list.append(create_region('r4', stages, 45))
bounding_html_list.append(
html.Div(left_region_html_list, className='left-bounding')
)
bounding_html_list.append(
html.Div([html.Div([
html.Div('', id='game60-team1', className='team team1'),
html.Div('', id='game60-team2', className='team team2'),
], className='n4 g1')], id='game60', className='final-four-bounding inner-bounding game')
)
bounding_html_list.append(
html.Div([html.Div([
html.Div('', id='game62-team1', className='team team1'),
html.Div('', id='game62-team2', className='team team2'),
], className='n2 g1')], id='game62', className='finals-bounding inner-bounding game')
)
bounding_html_list.append(
html.Div([html.Div([
html.Div('', id='game61-team1', className='team team1'),
html.Div('', id='game61-team2', className='team team2'),
], className='n4 g2')], id='game61', className='final-four-bounding inner-bounding game')
)
bounding_html_list.append(
html.Div(right_region_html_list, className='right-bounding')
)
bracket_html = html.Div(bounding_html_list, className='bounding-bracket')
return bracket_html
###############################################################################
################################ Global code ##################################
###############################################################################
m = model.Model(number_simulations=number_simulations, gender=gender, scoring_sys=scoring_system, year=year)
m.batch_simulate()
print("sims done")
m.create_json_files()
m.update_entry_picks()
m.initialize_special_entries()
m.analyze_special_entries()
m.add_fake_entries(fake_entries)
m.add_bulk_entries_from_database(real_entries)
m.add_simulation_results_postprocessing()
m.raw_print()
all_results = m.output_results()
all_results = m.output_results()
special_wins = m.get_special_wins()
special_results = all_results[-4:]
entry_results = all_results[:-4]
table_columns_pre=['Entry']
table_columns_places=['1st', '2nd']
table_columns_quintiles=['1st Q.', '2nd Q.', '3rd Q.', '4th Q.']
table_columns_post=['Avg Plc.']
###############################################################################
################################ Global code ##################################
###############################################################################
def discrete_background_color_bins(df, n_bins=9, columns='all', dark_color='Blues'):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'all':
if 'id' in df:
df_numeric_columns = df.select_dtypes('number').drop(['id'], axis=1)
else:
df_numeric_columns = df.select_dtypes('number')
else:
df_numeric_columns = df[columns]
df_max = 1
df_min = 0
ranges = [
((df_max - df_min) * i) + df_min
for i in bounds
]
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
backgroundColor = colorlover.scales[str(n_bins)]['seq'][dark_color][i - 1]
color = 'white' if i > len(bounds) / 2. else 'inherit'
for column in df_numeric_columns:
styles.append({
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'backgroundColor': backgroundColor,
'color': color
})
return styles
table_data = prepare_table(entry_results, special_results, number_simulations)
figures = [
html.Header(children=[
html.Ul(children=[
html.Li(children=
html.A(href='https://github.com/codydegen/march_madness', children='GitHub')),
html.Li(children=
html.A(href='mailto:<EMAIL>', children='Contact Me')),
], id='header-list'
)
]),
html.H1('Simulation Of '+gender.capitalize()[:-1]+'\'s March Madness Brackets: '+str(year)),
html.P(children=['A pool of '+str(number_entries)+' brackets is simulated '+
str(number_simulations)+' times to see who has the best-performing brackets'+
' over time. Entries in ',html.Span('beige', id='beige'),' are '+
'algorithmicly generated. "most_valuable_teams" is generated using ',
html.A(href='https://projects.fivethirtyeight.com/'+str(year)+
'-march-madness-predictions/',children='538\'s Power Rating to estimate'+
' the best teams'),', "most_popular_teams" is generated using ',
html.A(href='http://fantasy.espn.com/tournament-challenge-bracket/'+
str(year)+'/en/whopickedwhom', children=' ESPN\'s most popular teams'),
' and "chalk" is generated by picking the highest seed in every matchup. '+
'Select entries in the table to visualize the picks, or see how they stack'+
' up by placement or by raw score. If you\'d like to see the results for'+
' a small group of entries or simulations, scroll to the bottom and you '+
'can see how things might change.'+
'']),
dt.DataTable(
id="scoring-table",
columns=[{"name": i, "id": i} for i in table_columns_pre]+
[{"name": i, "id": i, "type": "numeric", "format": FormatTemplate.percentage(1)} for i in table_columns_places] +
[{"name": i, "id": i, "type": "numeric", "format": FormatTemplate.percentage(1)} for i in table_columns_quintiles] +
[{"name": i, "id": i} for i in table_columns_post],
data=table_data,
row_selectable='multi',
fixed_rows={'headers': True},
selected_rows=[0],
sort_action='native',
style_cell={'textAlign': 'left',
'width': '40px'},
style_table={'height': '300px', 'overflowY': 'auto'},
style_data_conditional=discrete_background_color_bins(df(data=table_data), | |
bool
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: true if the element is must be completed; otherwise, false.
"""
pass
@staticmethod
def ListenerExists(eventId):
"""
ListenerExists(eventId: AutomationEvents) -> bool
Gets a value that indicates whether UI Automation is listening for the
specified event.
eventId: One of the enumeration values.
Returns: A boolean that indicates whether UI Automation is listening for the event.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def RaiseAsyncContentLoadedEvent(self, args):
"""
RaiseAsyncContentLoadedEvent(self: AutomationPeer, args: AsyncContentLoadedEventArgs)
Called by the System.Windows.Automation.Peers.AutomationPeer to raise the
System.Windows.Automation.AutomationElement.AsyncContentLoadedEvent event.
args: The event data.
"""
pass
def RaiseAutomationEvent(self, eventId):
"""
RaiseAutomationEvent(self: AutomationPeer, eventId: AutomationEvents)
Raises an automation event.
eventId: The event identifier.
"""
pass
def RaisePropertyChangedEvent(self, property, oldValue, newValue):
"""
RaisePropertyChangedEvent(self: AutomationPeer, property: AutomationProperty, oldValue: object, newValue: object)
Raises an event to notify the automation client of a changed property value.
property: The property that changed.
oldValue: The previous value of the property.
newValue: The new value of the property.
"""
pass
def ResetChildrenCache(self):
"""
ResetChildrenCache(self: AutomationPeer)
Synchronously resets the tree of child elements by calling
System.Windows.Automation.Peers.AutomationPeer.GetChildrenCore.
"""
pass
def SetFocus(self):
"""
SetFocus(self: AutomationPeer)
Sets the keyboard focus on the element that is associated with this automation
peer.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
"""
SetFocusCore(self: AutomationPeer)
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.SetFocus.
"""
pass
EventsSource = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets an System.Windows.Automation.Peers.AutomationPeer that is reported to the automation client as a source for all the events that come from this System.Windows.Automation.Peers.AutomationPeer.
Get: EventsSource(self: AutomationPeer) -> AutomationPeer
Set: EventsSource(self: AutomationPeer) = value
"""
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class UIElementAutomationPeer(AutomationPeer):
"""
Exposes System.Windows.UIElement types to UI Automation.
UIElementAutomationPeer(owner: UIElement)
"""
@staticmethod
def CreatePeerForElement(element):
"""
CreatePeerForElement(element: UIElement) -> AutomationPeer
Creates a System.Windows.Automation.Peers.UIElementAutomationPeer for the
specified System.Windows.UIElement.
element: The System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
Returns: A System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
@staticmethod
def FromElement(element):
"""
FromElement(element: UIElement) -> AutomationPeer
Gets the System.Windows.Automation.Peers.UIElementAutomationPeer for the
specified System.Windows.UIElement.
element: The System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
Returns: The System.Windows.Automation.Peers.UIElementAutomationPeer; or null, if the
System.Windows.Automation.Peers.UIElementAutomationPeer was not created by the
System.Windows.Automation.Peers.UIElementAutomationPeer.CreatePeerForElement(Sys
tem.Windows.UIElement) method.
"""
pass
def GetPattern(self, patternInterface):
"""
GetPattern(self: UIElementAutomationPeer, patternInterface: PatternInterface) -> object
Gets the control pattern for the System.Windows.UIElement that is associated
with this System.Windows.Automation.Peers.UIElementAutomationPeer.
patternInterface: A value from the enumeration.
Returns: An object that implements the
System.Windows.Automation.Provider.ISynchronizedInputProvider interface if
patternInterface is
System.Windows.Automation.Peers.PatternInterface.SynchronizedInput; otherwise,
null.
"""
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: UIElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
Owner = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the System.Windows.UIElement that is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
Get: Owner(self: UIElementAutomationPeer) -> UIElement
"""
class FrameworkElementAutomationPeer(UIElementAutomationPeer):
"""
Exposes System.Windows.FrameworkElement types to UI Automation.
FrameworkElementAutomationPeer(owner: FrameworkElement)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: FrameworkElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class ButtonBaseAutomationPeer(FrameworkElementAutomationPeer):
""" Represents a base class for exposing elements derived from System.Windows.Controls.Primitives.ButtonBase to UI Automation. """
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, owner: ButtonBase) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class ButtonAutomationPeer(ButtonBaseAutomationPeer, IInvokeProvider):
"""
Exposes System.Windows.Controls.Button types to UI Automation.
ButtonAutomationPeer(owner: Button)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: ButtonBaseAutomationPeer) -> str
Gets the accelerator key for the element associated with this
System.Windows.Automation.Peers.ButtonBaseAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: A string containing the accelerator key.
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
"""
GetAutomationControlTypeCore(self: ButtonAutomationPeer) -> AutomationControlType
Gets the control type of the element that is associated with the UI Automation
peer.
Returns: System.Windows.Automation.Peers.AutomationControlType.Button.
"""
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: ButtonBaseAutomationPeer) -> str
Gets the System.Windows.Automation.AutomationProperties.AutomationId for the
element associated with this
System.Windows.Automation.Peers.ButtonBaseAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.AutomationId.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally, if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource, this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
"""
GetChildrenCore(self: UIElementAutomationPeer) -> List[AutomationPeer]
Gets the collection of child elements of the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetChildren.
Returns: A list of child System.Windows.Automation.Peers.AutomationPeer elements.
"""
pass
def GetClassNameCore(self, *args): #cannot find CLR method
"""
GetClassNameCore(self: ButtonAutomationPeer) -> str
Gets the name of the control that is associated with this UI Automation peer.
Returns: A string that contains "Button".
"""
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN, System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/evaluation/evaluation.sequences.ipynb (unless otherwise specified).
__all__ = ['eval_seqreveal', 'eval_staticprofile', 'eval_reclength', 'eval_profilelength']
# Cell
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from .metrics import precision, recall, mrr
# Internal Cell
def get_test_sequences(test_data, given_k, seq_col='sequence'):
# we can run evaluation only over sequences longer than abs(LAST_K)
test_sequences = test_data.loc[test_data[seq_col].map(len) > abs(given_k), seq_col].values
return test_sequences
# Internal Cell
def get_test_sequences_and_users(test_data, given_k, train_users, seq_col='sequence', user_col='user_id'):
# we can run evaluation only over sequences longer than abs(LAST_K)
mask = test_data[seq_col].map(len) > abs(given_k)
mask &= test_data[user_col].isin(train_users)
test_sequences = test_data.loc[mask, seq_col].values
test_users = test_data.loc[mask, user_col].values
return test_sequences, test_users
# Internal Cell
def sequential_evaluation(recommender,
test_sequences,
evaluation_functions,
users=None,
given_k=1,
look_ahead=1,
top_n=10,
scroll=True,
step=1):
"""
Runs sequential evaluation of a recommender over a set of test sequences
:param recommender: the instance of the recommender to test
:param test_sequences: the set of test sequences
:param evaluation_functions: list of evaluation metric functions
:param users: (optional) the list of user ids associated to each test sequence. Required by personalized models like FPMC.
:param given_k: (optional) the initial size of each user profile, starting from the first interaction in the sequence.
If <0, start counting from the end of the sequence. It must be != 0.
:param look_ahead: (optional) number of subsequent interactions in the sequence to be considered as ground truth.
It can be any positive number or 'all' to extend the ground truth until the end of the sequence.
:param top_n: (optional) size of the recommendation list
:param scroll: (optional) whether to scroll the ground truth until the end of the sequence.
If True, expand the user profile and move the ground truth forward of `step` interactions. Recompute and evaluate recommendations every time.
If False, evaluate recommendations once per sequence without expanding the user profile.
:param step: (optional) number of interactions that will be added to the user profile at each step of the sequential evaluation.
:return: the list of the average values for each evaluation metric
"""
if given_k == 0:
raise ValueError('given_k must be != 0')
metrics = np.zeros(len(evaluation_functions))
with tqdm(total=len(test_sequences)) as pbar:
for i, test_seq in enumerate(test_sequences):
if users is not None:
user = users[i]
else:
user = None
if scroll:
metrics += sequence_sequential_evaluation(recommender,
test_seq,
evaluation_functions,
user,
given_k,
look_ahead,
top_n,
step)
else:
metrics += evaluate_sequence(recommender,
test_seq,
evaluation_functions,
user,
given_k,
look_ahead,
top_n)
pbar.update(1)
return metrics / len(test_sequences)
# Internal Cell
def evaluate_sequence(recommender, seq, evaluation_functions, user, given_k, look_ahead, top_n):
"""
:param recommender: which recommender to use
:param seq: the user_profile/ context
:param given_k: last element used as ground truth. NB if <0 it is interpreted as first elements to keep
:param evaluation_functions: which function to use to evaluate the rec performance
:param look_ahead: number of elements in ground truth to consider. if look_ahead = 'all' then all the ground_truth sequence is considered
:return: performance of recommender
"""
# safety checks
if given_k < 0:
given_k = len(seq) + given_k
user_profile = seq[:given_k]
ground_truth = seq[given_k:]
# restrict ground truth to look_ahead
ground_truth = ground_truth[:look_ahead] if look_ahead != 'all' else ground_truth
ground_truth = list(map(lambda x: [x], ground_truth)) # list of list format
if not user_profile or not ground_truth:
# if any of the two missing all evaluation functions are 0
return np.zeros(len(evaluation_functions))
r = recommender.recommend(user_profile, user)[:top_n]
if not r:
# no recommendation found
return np.zeros(len(evaluation_functions))
reco_list = recommender.get_recommendation_list(r)
tmp_results = []
for f in evaluation_functions:
tmp_results.append(f(ground_truth, reco_list))
return np.array(tmp_results)
# Internal Cell
def sequence_sequential_evaluation(recommender, seq, evaluation_functions, user, given_k, look_ahead, top_n, step):
if given_k < 0:
given_k = len(seq) + given_k
eval_res = 0.0
eval_cnt = 0
for gk in range(given_k, len(seq), step):
eval_res += evaluate_sequence(recommender, seq, evaluation_functions, user, gk, look_ahead, top_n)
eval_cnt += 1
return eval_res / eval_cnt
# Cell
def eval_seqreveal(train_data,
test_data,
model,
top_k=10,
):
"""
Evaluation with sequentially revealed user-profiles.
Here we evaluate the quality of the recommendations in a setting in which
user profiles are revealed sequentially. The user profile starts from the
first GIVEN_K events (or, alternatively, from the last -GIVEN_K events if GIVEN_K<0).
The recommendations are evaluated against the next LOOK_AHEAD events (the ground truth).
The user profile is next expanded to the next STEP events, the ground truth is
scrolled forward accordingly, and the evaluation continues until the sequence ends.
In typical next-item recommendation, we start with GIVEN_K=1, generate a set
of alternatives that will evaluated against the next event in the sequence
(LOOK_AHEAD=1), move forward of one step (STEP=1) and repeat until the
sequence ends.
You can set the LOOK_AHEAD='all' to see what happens if you had to recommend
a whole sequence instead of a set of a set of alternatives to a user.
Note:
Metrics are averaged over each sequence first, then averaged over all test sequences.
"""
GIVEN_K = 1
LOOK_AHEAD = 1
STEP = 1
metrics=['precision', 'recall', 'mrr']
test_sequences = get_test_sequences(test_data, GIVEN_K)
print('{} sequences available for evaluation'.format(len(test_sequences)))
results = sequential_evaluation(model,
test_sequences=test_sequences,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=[eval(metric) for metric in metrics],
top_n=top_k,
scroll=True, # scrolling averages metrics over all profile lengths
step=STEP)
results = [results, GIVEN_K, LOOK_AHEAD, STEP]
results = {
"Model": type(model).__name__,
"GIVEN_K": results[1],
"LOOK_AHEAD": results[2],
"STEP": results[3],
f"Precision@{top_k}": results[0][0],
f"Recall@{top_k}": results[0][1],
f"MRR@{top_k}": results[0][2],
}
return results
# Cell
def eval_staticprofile(train_data,
test_data,
model,
top_k=10,
):
"""
Evaluation with "static" user-profiles.
Here we evaluate the quality of the recommendations in a setting in which
user profiles are instead static. The user profile starts from the first
GIVEN_K events (or, alternatively, from the last -GIVEN_K events if GIVEN_K<0).
The recommendations are evaluated against the next LOOK_AHEAD events (the ground truth).
The user profile is not extended and the ground truth doesn't move forward.
This allows to obtain "snapshots" of the recommendation performance for
different user profile and ground truth lenghts. Also here you can set the
LOOK_AHEAD='all' to see what happens if you had to recommend a whole sequence
instead of a set of a set of alternatives to a user.
"""
GIVEN_K = 1
LOOK_AHEAD = 'all'
STEP=1
metrics=['precision', 'recall', 'mrr']
test_sequences = get_test_sequences(test_data, GIVEN_K)
print('{} sequences available for evaluation'.format(len(test_sequences)))
results = sequential_evaluation(model,
test_sequences=test_sequences,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=[eval(metric) for metric in metrics],
top_n=top_k,
scroll=False # notice that scrolling is disabled!
)
results = [results, GIVEN_K, LOOK_AHEAD, STEP]
results = {
"Model": type(model).__name__,
"GIVEN_K": results[1],
"LOOK_AHEAD": results[2],
"STEP": results[3],
f"Precision@{top_k}": results[0][0],
f"Recall@{top_k}": results[0][1],
f"MRR@{top_k}": results[0][2],
}
return results
# Cell
def eval_reclength(train_data,
test_data,
model,
):
"""
Evaluation for different recommendation list lengths. Analysis of next-item recommendation.
In next-item recommendation, we analyse the performance of the recommender system in the
scenario of next-item recommendation over the following dimensions:
- the length of the recommendation list, and
- the length of the user profile.
Note:
This evaluation is by no means exhaustive, as different the hyper-parameters
of the recommendation algorithm should be carefully tuned before drawing any
conclusions. Unfortunately, given the time constraints for this tutorial, we
had to leave hyper-parameter tuning out. A very useful reference about careful
evaluation of (session-based) recommenders can be found at:
"""
GIVEN_K = 1
LOOK_AHEAD = 1
STEP = 1
topk_list = [1, 5, 10, 20, 50, 100]
res_list = []
metrics=['precision', 'recall', 'mrr']
test_sequences = get_test_sequences(test_data, GIVEN_K)
print('{} sequences available for evaluation'.format(len(test_sequences)))
for topn in topk_list:
print('Evaluating recommendation lists with length: {}'.format(topn))
res_tmp = sequential_evaluation(model,
test_sequences=test_sequences,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=[eval(metric) for metric in metrics],
top_n=topn,
scroll=True, # here we average over all profile lengths
step=STEP)
mvalues = list(zip(metrics, res_tmp))
res_list.append((topn, mvalues))
# show separate plots per metric
# fig, axes = plt.subplots(nrows=1, ncols=len(metrics), figsize=(15,5))
res_list_t = list(zip(*res_list))
results = []
for midx, metric in enumerate(metrics):
mvalues = [res_list_t[1][j][midx][1] for j in range(len(res_list_t[1]))]
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(topk_list, mvalues)
ax.set_title(metric)
ax.set_xticks(topk_list)
ax.set_xlabel('List length')
fig.tight_layout()
results.append(fig)
plt.close()
return results
# Cell
def eval_profilelength(train_data,
test_data,
model,
top_k=20,
):
"""
Evaluation for different user profile lengths. Analysis of next-item recommendation.
In next-item recommendation, we analyse the performance of the recommender system in the
scenario of next-item recommendation over the following dimensions:
- the length of | |
sigma' % (gate_half,nsigma))
#gate_half = nsigma*abs_dev_med
#logger.debug('set gate_half=%.3f for intensity gated average, which is %.3f * abs_dev_med' % (gate_half,nsigma))
# 2nd loop over recs in block to evaluate gated parameters
logger.debug('begin 2nd iteration')
sta_int_lo = np.zeros(shape, dtype=np.uint64)
sta_int_hi = np.zeros(shape, dtype=np.uint64)
arr_max = np.zeros(shape, dtype=block.dtype)
arr_min = np.ones (shape, dtype=block.dtype) * 0x3fff
gate_lo = arr1_u16 * int_lo
gate_hi = arr1_u16 * int_hi
#gate_hi = np.minimum(arr_av1 + gate_half, gate_hi).astype(dtype=block.dtype)
#gate_lo = np.maximum(arr_av1 - gate_half, gate_lo).astype(dtype=block.dtype)
gate_lo = np.maximum(arr_qlo, gate_lo).astype(dtype=block.dtype)
gate_hi = np.minimum(arr_qhi, gate_hi).astype(dtype=block.dtype)
cond = gate_hi>gate_lo
gate_hi[np.logical_not(cond)] +=1
#gate_hi = np.select((cond, np.logical_not(cond)), (gate_hi, gate_hi+1), 0)
logger.debug(info_ndarr(gate_lo, ' gate_lo '))
logger.debug(info_ndarr(gate_hi, ' gate_hi '))
arr_sum0 = np.zeros(shape, dtype=np.uint64)
arr_sum1 = np.zeros(shape, dtype=np.float64)
arr_sum2 = np.zeros(shape, dtype=np.float64)
#blockdbl = np.array(block, dtype=np.float64)
for nrec in range(nrecs):
raw = block[nrec,:]
rawdbl = raw.astype(dtype=np.uint64) # blockdbl[nrec,:]
logger.debug('nrec:%03d median(raw-ave): %f' % (nrec, np.median(raw.astype(dtype=np.float64) - arr_med)))
#logger.debug('nrec:%03d median(raw-ave): %.6f' % (nrec, np.median(raw.astype(dtype=np.float64) - arr_med)))
#logger.debug(info_ndarr(raw, ' raw '))
#logger.debug(info_ndarr(arr_med, ' arr_med '))
condlist = (np.logical_not(np.logical_or(raw<gate_lo, raw>gate_hi)),)
arr_sum0 += np.select(condlist, (arr1,), 0)
arr_sum1 += np.select(condlist, (rawdbl,), 0)
arr_sum2 += np.select(condlist, (np.square(rawdbl),), 0)
sta_int_lo += np.select((raw<int_lo,), (arr1,), 0)
sta_int_hi += np.select((raw>int_hi,), (arr1,), 0)
arr_max = np.maximum(arr_max, raw)
arr_min = np.minimum(arr_min, raw)
arr_av1 = divide_protected(arr_sum1, arr_sum0)
arr_av2 = divide_protected(arr_sum2, arr_sum0)
frac_int_lo = np.array(sta_int_lo/nrecs, dtype=np.float32)
frac_int_hi = np.array(sta_int_hi/nrecs, dtype=np.float32)
arr_rms = np.sqrt(arr_av2 - np.square(arr_av1))
#rms_ave = arr_rms.mean()
rms_ave = mean_constrained(arr_rms, rms_lo, rms_hi)
rms_min, rms_max = evaluate_limits(arr_rms, rmsnlo, rmsnhi, rms_lo, rms_hi, cmt='RMS')
ave_min, ave_max = evaluate_limits(arr_av1, intnlo, intnhi, int_lo, int_hi, cmt='AVE')
arr_sta_rms_hi = np.select((arr_rms>rms_max,), (arr1,), 0)
arr_sta_rms_lo = np.select((arr_rms<rms_min,), (arr1,), 0)
arr_sta_int_hi = np.select((frac_int_hi>fraclm,), (arr1,), 0)
arr_sta_int_lo = np.select((frac_int_lo>fraclm,), (arr1,), 0)
arr_sta_ave_hi = np.select((arr_av1>ave_max,), (arr1,), 0)
arr_sta_ave_lo = np.select((arr_av1<ave_min,), (arr1,), 0)
logger.info('Bad pixel status:'\
+'\n status 1: %8d pixel rms > %.3f' % (arr_sta_rms_hi.sum(), rms_max)\
+'\n status 2: %8d pixel rms < %.3f' % (arr_sta_rms_lo.sum(), rms_min)\
+'\n status 4: %8d pixel intensity > %g in more than %g fraction of events' % (arr_sta_int_hi.sum(), int_hi, fraclm)\
+'\n status 8: %8d pixel intensity < %g in more than %g fraction of events' % (arr_sta_int_lo.sum(), int_lo, fraclm)\
+'\n status 16: %8d pixel average > %g' % (arr_sta_ave_hi.sum(), ave_max)\
+'\n status 32: %8d pixel average < %g' % (arr_sta_ave_lo.sum(), ave_min)\
)
#0/1/2/4/8/16/32 for good/hot-rms/cold-rms/saturated/cold/average above limit/average below limit,
arr_sta = np.zeros(shape, dtype=np.uint64)
arr_sta += arr_sta_rms_hi # hot rms
arr_sta += arr_sta_rms_lo*2 # cold rms
arr_sta += arr_sta_int_hi*4 # satturated
arr_sta += arr_sta_int_lo*8 # cold
arr_sta += arr_sta_ave_hi*16 # too large average
arr_sta += arr_sta_ave_lo*32 # too small average
absdiff_av1_med = np.abs(arr_av1-arr_med)
logger.debug(info_ndarr(absdiff_av1_med, 'np.abs(arr_av1-arr_med)', first=100, last=105))
logger.info('estimator of difference between gated average and median np.median(np.abs(arr_av1-arr_med)): %.3f' % np.median(absdiff_av1_med))
cond = absdiff_av1_med > med_abs_dev
arr_av1[cond] = arr_med[cond]
arr_sta_bad = np.select((cond,), (arr1,), 0)
frac_bad = arr_sta_bad.sum()/float(arr_av1.size)
logger.debug('fraction of panel pixels with gated average deviated from and replaced by median: %.6f' % frac_bad)
#logger.info('data block processing time = %.3f sec' % (time()-t0_sec))
#logger.debug(info_ndarr(arr_av1, 'arr_av1 [100:105] ', first=100, last=105))
#logger.debug(info_ndarr(arr_rms, 'pixel_rms [100:105] ', first=100, last=105))
#logger.debug(info_ndarr(arr_sta, 'pixel_status[100:105] ', first=100, last=105))
#logger.debug(info_ndarr(arr_med, 'arr mediane [100:105] ', first=100, last=105))
return arr_av1, arr_rms, arr_sta
#===
#===
#===
def proc_block(block, **kwa):
"""Dark data 1st stage processing to define gate limits.
block.shape = (nrecs, <raw-detector-shape>),
where <raw-detector-shape> can be per segment (352, 384) or per detector (nsegs, 352, 384)
Returns segment/detector shaped arrays of gate_lo, gate_hi, arr_med, arr_abs_dev
"""
exp = kwa.get('exp', None)
detname = kwa.get('det', None)
int_lo = kwa.get('int_lo', 1) # lowest intensity accepted for dark evaluation
int_hi = kwa.get('int_hi', 16000) # highest intensity accepted for dark evaluation
#intnlo = kwa.get('intnlo', 6.0) # intensity ditribution number-of-sigmas low
#intnhi = kwa.get('intnhi', 6.0) # intensity ditribution number-of-sigmas high
#rms_lo = kwa.get('rms_lo', 0.001) # rms ditribution low
#rms_hi = kwa.get('rms_hi', 16000) # rms ditribution high
#rmsnlo = kwa.get('rmsnlo', 6.0) # rms ditribution number-of-sigmas low
#rmsnhi = kwa.get('rmsnhi', 6.0) # rms ditribution number-of-sigmas high
#fraclm = kwa.get('fraclm', 0.1) # allowed fraction limit
fraclo = kwa.get('fraclo', 0.05) # fraction of statistics below low gate limit
frachi = kwa.get('frachi', 0.95) # fraction of statistics below high gate limit
frac05 = 0.5
#nrecs1 = kwa.get('nrecs1', None) # number of records for the 1st stage processing
logger.debug('in proc_dark_block for exp=%s det=%s, block.shape=%s' % (exp, detname, str(block.shape)))
logger.info(info_ndarr(block, 'begin pricessing of the data block', first=100, last=105))
t0_sec = time()
#nrecs1, ny, nx = block.shape[0]
nrecs1= block.shape[0]
shape = block.shape[1:] #(ny, nx)
#if nrecs1 is None or nrecs1>nrecs: nrecs1 = nrecs
arr1_u16 = np.ones(shape, dtype=np.uint16)
arr1 = np.ones(shape, dtype=np.uint64)
t1_sec = time()
"""
NOTE:
- our data is uint16.
- np.median(block, axis=0) or np.quantile(...,interpolation='linear') return result rounded to int
- in order to return interpolated float values apply the trick:
data_block + random [0,1)-0.5
- this would distort data in the range [-0.5,+0.5) ADU, but would allow
to get better interpolation for median and quantile values
- use nrecs1 (< nrecs) due to memory and time consumption
"""
#blockf64 = np.random.random(block.shape) - 0.5 + block
#logger.debug(info_ndarr(blockf64, '1-st stage conversion uint16 to float64,'\
# +' add random [0,1)-0.5 time = %.3f sec'%\
# (time()-t1_sec), first=100, last=105))
blockf64 = block
#arr_med = np.median(block, axis=0)
arr_med = np.quantile(blockf64, frac05, axis=0, interpolation='linear')
arr_qlo = np.quantile(blockf64, fraclo, axis=0, interpolation='lower')
arr_qhi = np.quantile(blockf64, frachi, axis=0, interpolation='higher')
logger.debug('block array median/quantile(frac) for med, qlo, qhi time = %.3f sec' % (time()-t1_sec))
med_med = np.median(arr_med)
med_qlo = np.median(arr_qlo)
med_qhi = np.median(arr_qhi)
arr_dev_3d = block[:,] - arr_med # .astype(dtype=np.float64)
arr_abs_dev = np.median(np.abs(arr_dev_3d), axis=0)
med_abs_dev = np.median(arr_abs_dev)
s = 'proc_block pre-processing time %.3f sec' % (time()-t0_sec)\
+ '\n results for median over pixels intensities:'\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - pedestal estimator' % (frac05, med_med)\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - gate low limit' % (fraclo, med_qlo)\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - gate upper limit' % (frachi, med_qhi)\
+ '\n event spectrum spread median(abs(raw-med)): %.3f ADU - spectral peak width estimator' % med_abs_dev
logger.info(s)
gate_lo = arr1_u16 * int_lo
gate_hi = arr1_u16 * int_hi
gate_lo = np.maximum(np.floor(arr_qlo), gate_lo).astype(dtype=block.dtype)
gate_hi = np.minimum(np.ceil(arr_qhi), gate_hi).astype(dtype=block.dtype)
cond = gate_hi>gate_lo
gate_hi[np.logical_not(cond)] +=1
logger.debug('proc_block results'\
+info_ndarr(arr_med, '\n arr_med[100:105]', first=100, last=105)\
+info_ndarr(arr_abs_dev, '\n abs_dev[100:105]', first=100, last=105)\
+info_ndarr(gate_lo, '\n gate_lo[100:105]', first=100, last=105)\
+info_ndarr(gate_hi, '\n gate_hi[100:105]', first=100, last=105))
#+info_ndarr(arr_qlo, '\n arr_qlo[100:105]', first=100, last=105)\
#+info_ndarr(arr_qhi, '\n arr_qhi[100:105]', first=100, last=105)\
return gate_lo, gate_hi, arr_med, arr_abs_dev
class DarkProc(object):
"""dark data accumulation and processing
"""
def __init__(self, **kwa):
self.nrecs = kwa.get('nrecs',1000)
self.nrecs1 = kwa.get('nrecs1',100)
self.plotim = kwa.get('plotim', 1)
self.savebw = kwa.get('savebw', 0xffff)
self.fraclm = kwa.get('fraclm', 0.1)
self.int_lo = kwa.get('int_lo', 1) # lowest intensity accepted for dark evaluation
self.int_hi = kwa.get('int_hi', 16000) # highest intensity accepted for dark evaluation
self.intnlo = kwa.get('intnlo', 6.0) # intensity ditribution number-of-sigmas low
self.intnhi = kwa.get('intnhi', 6.0) # intensity ditribution number-of-sigmas high
self.rms_lo = kwa.get('rms_lo', 0.001) # rms ditribution low
self.rms_hi = kwa.get('rms_hi', 16000) # rms ditribution high
self.rmsnlo = kwa.get('rmsnlo', 6.0) # rms ditribution number-of-sigmas low
self.rmsnhi = kwa.get('rmsnhi', 6.0) # rms ditribution number-of-sigmas high
self.status = 0 # 0/1/2 stage
self.kwa = kwa
self.block = None
self.irec = -1
def accumulate_block(self, raw):
self.block[self.irec,:] = raw # & M14 - already done
def proc_block(self):
t0_sec = time()
self.gate_lo, self.gate_hi, self.arr_med, self.abs_dev = proc_block(self.block, **self.kwa)
logger.info('data block processing total time %.3f sec' % (time()-t0_sec)\
+info_ndarr(self.arr_med, '\n arr_med[100:105]', first=100, last=105)\
+info_ndarr(self.abs_dev, '\n abs_dev[100:105]', first=100, last=105)\
+info_ndarr(self.gate_lo, '\n gate_lo[100:105]', first=100, last=105)\
+info_ndarr(self.gate_hi, '\n gate_hi[100:105]', first=100, last=105))
def init_proc(self):
shape_raw = self.arr_med.shape
dtype_raw = self.gate_lo.dtype
logger.info('Stage 2 initialization for raw shape %s and dtype %s' % (str(shape_raw), str(dtype_raw)))
self.arr_sum0 = np.zeros(shape_raw, dtype=np.uint64)
self.arr_sum1 = np.zeros(shape_raw, dtype=np.float64)
self.arr_sum2 = np.zeros(shape_raw, dtype=np.float64)
self.arr0 = np.zeros(shape_raw, dtype=dtype_raw)
self.arr1 = np.ones (shape_raw, dtype=dtype_raw)
self.arr1u64 = np.ones (shape_raw, dtype=np.uint64)
self.sta_int_lo = np.zeros(shape_raw, dtype=np.uint64)
self.sta_int_hi = np.zeros(shape_raw, dtype=np.uint64)
self.arr_sum0 = np.zeros(shape_raw, dtype=np.uint64)
self.arr_sum1 = np.zeros(shape_raw, dtype=np.float64)
self.arr_sum2 = np.zeros(shape_raw, dtype=np.float64)
self.gate_hi = np.minimum(self.arr1 * self.int_hi, self.gate_hi)
self.gate_lo = np.maximum(self.arr1 * self.int_lo, self.gate_lo)
self.arr_max = np.zeros(shape_raw, dtype=dtype_raw)
self.arr_min = np.ones (shape_raw, dtype=dtype_raw) * 0xffff
def summary(self):
| |
r"""
Phrases that contain meanings of :class:`.Statement`\s.
Can contain references to other :class:`.Statement`\s,
to numeric values, to dates, or to quantities (with the use of
the `pint <https://pint.readthedocs.io/en/>`_ library).
"""
from __future__ import annotations
from abc import ABCMeta
from itertools import product
from string import Template
from typing import Any, Dict, Mapping
from typing import List, Optional, Sequence, Set, Tuple
from pydantic import BaseModel, Extra
from nettlesome.terms import Comparable, TermSequence
from nettlesome.terms import Term
class StatementTemplate(Template):
r"""
A text template for a Predicate.
Should include placeholders for any replaceable :class:`~nettlesome.terms.Term`\s
that can be substituted into the :class:`~nettlesome.predicates.Predicate`\.
"""
def __init__(self, template: str, make_singular: bool = True) -> None:
r"""
Identify placeholders in template text, and make verbs singular if needed.
>>> school_template = StatementTemplate(
... "$group were at school", make_singular=True)
>>> str(school_template)
'StatementTemplate("$group was at school")'
The make_singular flag only affects verbs immediately after :class:`~nettlesome.terms.Term`\s.
>>> text = "$group thought the exams were difficult"
>>> exams_template = StatementTemplate(text, make_singular=True)
>>> str(exams_template)
'StatementTemplate("$group thought the exams were difficult")'
:param template:
text for creating a :py:class:`string.Template`
:param make_singular:
whether "were" after a placeholder should be converted to
singular "was"
"""
super().__init__(template)
placeholders = [
m.group("named") or m.group("braced")
for m in self.pattern.finditer(self.template)
if m.group("named") or m.group("braced")
]
self._placeholders = list(dict.fromkeys(placeholders))
if make_singular:
self.make_content_singular()
def __str__(self) -> str:
return f'StatementTemplate("{self.template}")'
def make_content_singular(self) -> None:
"""Convert template text for self.context to singular "was"."""
for placeholder in self.placeholders:
named_pattern = "$" + placeholder + " were"
braced_pattern = "${" + placeholder + "} were"
self.template = self.template.replace(
named_pattern, "$" + placeholder + " was"
)
self.template = self.template.replace(
braced_pattern, "$" + placeholder + " was"
)
return None
def get_template_with_plurals(self, context: Sequence[Term]) -> str:
"""
Get a version of self with "was" replaced by "were" for any plural terms.
Does not modify this object's template attribute.
"""
result = self.template[:]
placeholders = self.placeholders
self._check_number_of_terms(placeholders, context)
for idx, factor in enumerate(context):
if factor.__dict__.get("plural") is True:
named_pattern = "$" + placeholders[idx] + " was"
braced_pattern = "${" + placeholders[idx] + "} was"
result = result.replace(
named_pattern, "$" + placeholders[idx] + " were"
)
result = result.replace(
braced_pattern, "$" + placeholders[idx] + " were"
)
return result
@property
def placeholders(self) -> List[str]:
"""List substrings of template text marked as placeholders."""
return self._placeholders
def get_term_sequence_from_mapping(
self, term_mapping: Mapping[str, Term]
) -> TermSequence:
"""Get an ordered list of terms from a mapping of placeholder names to terms."""
placeholders = self.placeholders
result = [term_mapping[placeholder] for placeholder in placeholders]
return TermSequence(result)
def _check_number_of_terms(
self, placeholders: List[str], context: Sequence[Term]
) -> None:
if len(set(placeholders)) != len(context):
raise ValueError(
f"The number of terms passed in 'context' ({len(context)}) must be equal to the "
f"number of placeholders in the StatementTemplate ({len(placeholders)})."
)
return None
def mapping_placeholder_to_term(
self, context: Sequence[Term]
) -> Dict[str, Comparable]:
"""
Get a mapping of template placeholders to context terms.
:param context:
a list of context :class:`.factors.Factor`/s, in the same
order they appear in the template string.
"""
self._check_number_of_terms(self.placeholders, context)
return dict(zip(self.placeholders, context))
def mapping_placeholder_to_term_name(
self, context: Sequence[Term]
) -> Dict[str, str]:
"""
Get a mapping of template placeholders to the names of their context terms.
:param context:
a list of :class:`~authorityspoke.comparable.Comparable`
context terms in the same
order they appear in the template string.
"""
mapping = self.mapping_placeholder_to_term(context)
return {k: v.short_string for k, v in mapping.items()}
def substitute_with_plurals(self, terms: Sequence[Term]) -> str:
"""
Update template text with strings representing Comparable terms.
:param context:
terms with `.short_string()`
methods to substitute into template, and optionally with `plural`
attributes to indicate whether to change the word "was" to "were"
:returns:
updated version of template text
"""
new_content = self.get_template_with_plurals(context=terms)
substitutions = self.mapping_placeholder_to_term_name(context=terms)
new_template = self.__class__(new_content, make_singular=False)
return new_template.substitute(substitutions)
class PhraseABC(metaclass=ABCMeta):
r"""Abstract base class for phrases that can be compared like Predicates."""
content: str
truth: Optional[bool]
def contradicts(self, other: Any) -> bool:
r"""
Test whether ``other`` and ``self`` have contradictory meanings.
This is determined only by the ``truth`` value, the exact template
content, and whether the placeholders indicate interchangeable terms.
"""
if not self._same_meaning_as_true_predicate(other):
return False
if self.truth is None or other.truth is None:
return False
return self.truth != other.truth
def means(self, other: Any) -> bool:
"""
Test if ``self`` and ``other`` have identical meanings.
The means method will return False based on any difference in
the Predicate's template text, other than the placeholder names.
>>> talked = Predicate(content="$speaker talked to $listener")
>>> spoke = Predicate(content="$speaker spoke to $listener")
>>> talked.means(spoke)
False
The means method will also return False if there are differences in
which placeholders are marked as interchangeable.
>>> game_between_others = Predicate(
... content="$organizer1 and $organizer2 planned for $player1 to play $game against $player2.")
>>> game_between_each_other = Predicate(
... content="$organizer1 and $organizer2 planned for $organizer1 to play $game against $organizer2.")
>>> game_between_others.means(game_between_each_other)
False
:param other:
an object to compare
:returns:
whether ``other`` is another Predicate with the same text,
truth value, and pattern of interchangeable placeholders
"""
if not self._same_meaning_as_true_predicate(other):
return False
return self.truth == other.truth
def implies(self, other: Any) -> bool:
"""
Test whether ``self`` implies ``other``.
A Predicate implies another Predicate only if
it :meth:`~nettlesome.predicates.Predicate.means` the
other Predicate, or if the other Predicate has the same
text but a truth value of None.
>>> lived_at = Predicate(
... content="$person lived at $place",
... truth=True)
>>> whether_lived_at = Predicate(
... content="$person lived at $place",
... truth=None)
>>> str(whether_lived_at)
'whether $person lived at $place'
>>> lived_at.implies(whether_lived_at)
True
>>> whether_lived_at.implies(lived_at)
False
:param other:
an object to compare for implication.
:returns:
whether ``other`` is another Predicate with the
same text, and the same truth value or no truth value.
"""
if self.truth is None:
return False
if not isinstance(other, self.__class__):
return False
if not self._same_meaning_as_true_predicate(other):
return False
if other.truth is None:
return True
return self.truth == other.truth
def __gt__(self, other: Any) -> bool:
r"""Alias for :meth:`~nettlesome.predicates.Predicate.implies`\."""
return self.implies(other)
def __ge__(self, other: Any) -> bool:
r"""
Test whether ``self`` either implies or has the same meaning as ``other``.
:param other:
an object to compare
:returns:
whether ``other`` is another Predicate that ``self`` either
:meth:`~nettlesome.predicates.Predicate.means`
or :meth:`~nettlesome.predicates.Predicate.implies`
"""
if self.means(other):
return True
return self.implies(other)
def __len__(self):
r"""
Get the number of Terms expected.
Also called the linguistic valency, arity, or adicity.
:returns:
the number of :class:`~nettlesome.terms.Term`\s that can fit
in the placeholders
in the :class:`~nettlesome.predicates.StatementTemplate`\.
"""
return len(set(self.template.placeholders))
@property
def template(self) -> StatementTemplate:
"""
A text template for the predicate.
:returns:
a :class:`StatementTemplate` object
"""
return StatementTemplate(self.content, make_singular=True)
def content_without_placeholders(self) -> str:
"""
Get template text with placeholders replaced by identical bracket pairs.
Produces a string that will evaluate equal for two templates with
identical non-placedholder text.
"""
changes = {p: "{}" for p in self.template.placeholders}
return self.template.substitute(**changes)
def _content_with_terms(self, terms: Sequence[Term]) -> str:
r"""
Make a sentence by filling in placeholders with names of Factors.
:param context:
terms to be mentioned in the context of
this Predicate. They do not need to be type :class:`.Entity`
:returns:
a sentence created by substituting string representations
of terms for the placeholders in the content template
"""
return self.template.substitute_with_plurals(terms)
def same_content_meaning(self, other: PhraseABC) -> bool:
"""
Test if :attr:`~Predicate.content` strings of ``self`` and ``other`` have same meaning.
:param other:
another :class:`Predicate` being compared to ``self``
:returns:
whether ``self`` and ``other`` have :attr:`~Predicate.content` strings
similar enough to be considered to have the same meaning.
"""
return (
self.content_without_placeholders().lower()
== other.content_without_placeholders().lower()
)
def same_term_positions(self, other: PhraseABC) -> bool:
"""Test if self and other have same positions for interchangeable Terms."""
return list(self.term_positions().values()) == list(
other.term_positions().values()
)
def _same_meaning_as_true_predicate(self, other: PhraseABC) -> bool:
"""Test if self and other mean the same if they are both True."""
if not isinstance(other, PhraseABC):
raise TypeError(
f"Type {self.__class__.__name__} can't imply, contradict, or "
f"have same meaning as type {other.__class__.__name__}"
)
if not isinstance(other, self.__class__):
return False
if not self.same_content_meaning(other):
return False
return self.same_term_positions(other)
def term_positions(self) -> Dict[str, Set[int]]:
"""
Create list of positions that each term could take without changing Predicate's meaning.
Assumes that if placeholders | |
<reponame>roundup-tracker/roundup<gh_stars>10-100
#
# Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
# This module is free software, and you may redistribute it and/or modify
# under the same terms as Python, so long as this copyright message and
# disclaimer are retained in their original form.
#
# IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
# OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
"""This module defines a backend that saves the hyperdatabase in a
database chosen by anydbm. It is guaranteed to always be available in python
versions >2.1.1 (the dumbdbm fallback in 2.1.1 and earlier has several
serious bugs, and is not available)
"""
__docformat__ = 'restructuredtext'
import os, marshal, re, weakref, string, copy, time, shutil, logging
from roundup.anypy.dbm_ import anydbm, whichdb
from roundup.anypy.strings import b2s, bs2b, repr_export, eval_import, is_us
from roundup import hyperdb, date, password, roundupdb, security, support
from roundup.mlink_expr import Expression
from roundup.backends import locking
from roundup.i18n import _
from roundup.backends.blobfiles import FileStorage
from roundup.backends.sessions_dbm import Sessions, OneTimeKeys
from roundup.backends.indexer_common import get_indexer
from hashlib import md5
def db_exists(config):
# check for the user db
for db in 'nodes.user nodes.user.db'.split():
if os.path.exists(os.path.join(config.DATABASE, db)):
return 1
return 0
def db_nuke(config):
shutil.rmtree(config.DATABASE)
#
# Now the database
#
class Database(FileStorage, hyperdb.Database, roundupdb.Database):
"""A database for storing records containing flexible data types.
Transaction stuff TODO:
- check the timestamp of the class file and nuke the cache if it's
modified. Do some sort of conflict checking on the dirty stuff.
- perhaps detect write collisions (related to above)?
attributes:
dbtype:
holds the value for the type of db. It is used by indexer to
identify the database type so it can import the correct indexer
module when using native text search mode.
"""
dbtype = "anydbm"
# used by migrate roundup_admin command. Is a no-op for anydbm.
# but needed to stop traceback in admin.
db_version_updated = False
def __init__(self, config, journaltag=None):
"""Open a hyperdatabase given a specifier to some storage.
The 'storagelocator' is obtained from config.DATABASE.
The meaning of 'storagelocator' depends on the particular
implementation of the hyperdatabase. It could be a file name,
a directory path, a socket descriptor for a connection to a
database over the network, etc.
The 'journaltag' is a token that will be attached to the journal
entries for any edits done on the database. If 'journaltag' is
None, the database is opened in read-only mode: the Class.create(),
Class.set(), Class.retire(), and Class.restore() methods are
disabled.
"""
FileStorage.__init__(self, config.UMASK)
self.config, self.journaltag = config, journaltag
self.dir = config.DATABASE
self.classes = {}
self.cache = {} # cache of nodes loaded or created
self.stats = {'cache_hits': 0, 'cache_misses': 0, 'get_items': 0,
'filtering': 0}
self.dirtynodes = {} # keep track of the dirty nodes by class
self.newnodes = {} # keep track of the new nodes by class
self.destroyednodes = {}# keep track of the destroyed nodes by class
self.transactions = []
self.indexer = get_indexer(config, self)
self.security = security.Security(self)
os.umask(config.UMASK)
# make sure the database directory exists
if not os.path.isdir(self.config.DATABASE):
os.makedirs(self.config.DATABASE)
# lock it
lockfilenm = os.path.join(self.dir, 'lock')
self.lockfile = locking.acquire_lock(lockfilenm)
self.lockfile.write(str(os.getpid()))
self.lockfile.flush()
self.Session = None
self.Otk = None
def post_init(self):
"""Called once the schema initialisation has finished.
"""
super(Database, self).post_init()
# reindex the db if necessary
if self.indexer.should_reindex():
self.reindex()
def refresh_database(self):
"""Rebuild the database
"""
self.reindex()
def getSessionManager(self):
if not self.Session:
self.Session = Sessions(self)
return self.Session
def getOTKManager(self):
if not self.Otk:
self.Otk = OneTimeKeys(self)
return self.Otk
def reindex(self, classname=None, show_progress=False):
if classname:
classes = [self.getclass(classname)]
else:
classes = self.classes.values()
for klass in classes:
if show_progress:
for nodeid in support.Progress('Reindex %s'%klass.classname,
klass.list()):
klass.index(nodeid)
else:
for nodeid in klass.list():
klass.index(nodeid)
self.indexer.save_index()
def __repr__(self):
return '<back_anydbm instance at %x>'%id(self)
#
# Classes
#
def __getattr__(self, classname):
"""A convenient way of calling self.getclass(classname)."""
if classname in self.classes:
return self.classes[classname]
raise AttributeError(classname)
def addclass(self, cl):
cn = cl.classname
if cn in self.classes:
raise ValueError(_('Class "%s" already defined.'%cn))
self.classes[cn] = cl
# add default Edit and View permissions
self.security.addPermission(name="Create", klass=cn,
description="User is allowed to create "+cn)
self.security.addPermission(name="Edit", klass=cn,
description="User is allowed to edit "+cn)
self.security.addPermission(name="View", klass=cn,
description="User is allowed to access "+cn)
self.security.addPermission(name="Retire", klass=cn,
description="User is allowed to retire "+cn)
def getclasses(self):
"""Return a list of the names of all existing classes."""
return sorted(self.classes.keys())
def getclass(self, classname):
"""Get the Class object representing a particular class.
If 'classname' is not a valid class name, a KeyError is raised.
"""
try:
return self.classes[classname]
except KeyError:
raise KeyError('There is no class called "%s"'%classname)
#
# Class DBs
#
def clear(self):
"""Delete all database contents
"""
logging.getLogger('roundup.hyperdb.backend').info('clear')
for cn in self.classes:
for dummy in 'nodes', 'journals':
path = os.path.join(self.dir, 'journals.%s'%cn)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path+'.db'): # dbm appends .db
os.remove(path+'.db')
# reset id sequences
path = os.path.join(os.getcwd(), self.dir, '_ids')
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path+'.db'): # dbm appends .db
os.remove(path+'.db')
def getclassdb(self, classname, mode='r'):
""" grab a connection to the class db that will be used for
multiple actions
"""
return self.opendb('nodes.%s'%classname, mode)
def determine_db_type(self, path):
""" determine which DB wrote the class file
"""
db_type = ''
if os.path.exists(path):
db_type = whichdb(path)
if not db_type:
raise hyperdb.DatabaseError(_("Couldn't identify database type"))
elif os.path.exists(path+'.db'):
# if the path ends in '.db', it's a dbm database, whether
# anydbm says it's dbhash or not!
db_type = 'dbm'
return db_type
def opendb(self, name, mode):
"""Low-level database opener that gets around anydbm/dbm
eccentricities.
"""
# figure the class db type
path = os.path.join(os.getcwd(), self.dir, name)
db_type = self.determine_db_type(path)
# new database? let anydbm pick the best dbm
# in Python 3+ the "dbm" ("anydbm" to us) module already uses the
# whichdb() function to do this
if not db_type or hasattr(anydbm, 'whichdb'):
if __debug__:
logging.getLogger('roundup.hyperdb.backend').debug(
"opendb anydbm.open(%r, 'c')"%path)
return anydbm.open(path, 'c')
# in Python <3 it anydbm was a little dumb so manually open the
# database with the correct module
try:
dbm = __import__(db_type)
except ImportError:
if db_type == 'gdbm':
try:
dbm = __import__('dbm.gnu')
except ImportError:
raise hyperdb.DatabaseError(_(
"Couldn't open database - the required module '%s' "
"(as dbm.gnu) is not available")%db_type)
else:
raise hyperdb.DatabaseError(_("Couldn't open database - the "
"required module '%s' is not available")%db_type)
if __debug__:
logging.getLogger('roundup.hyperdb.backend').debug(
"opendb %r.open(%r, %r)"%(db_type, path, mode))
return dbm.open(path, mode)
#
# Node IDs
#
def newid(self, classname):
""" Generate a new id for the given class
"""
# open the ids DB - create if if doesn't exist
db = self.opendb('_ids', 'c')
if classname in db:
newid = db[classname] = str(int(db[classname]) + 1)
else:
# the count() bit is transitional - older dbs won't start at 1
newid = str(self.getclass(classname).count()+1)
db[classname] = newid
db.close()
return newid
def setid(self, classname, setid):
""" Set the id counter: used during import of database
"""
# open the ids DB - create if if doesn't exist
db = self.opendb('_ids', 'c')
db[classname] = str(setid)
db.close()
#
# Nodes
#
def addnode(self, classname, nodeid, node):
""" add the specified node to its class's db
"""
# we'll be supplied these props if we're doing an import
if 'creator' not in node:
# add in the "calculated" properties (dupe so we don't affect
# calling code's node assumptions)
node = node.copy()
node['creator'] = self.getuid()
node['actor'] = self.getuid()
node['creation'] = node['activity'] = date.Date()
self.newnodes.setdefault(classname, {})[nodeid] = 1
self.cache.setdefault(classname, {})[nodeid] = node
self.savenode(classname, nodeid, node)
def setnode(self, classname, nodeid, node):
""" change the specified node
"""
self.dirtynodes.setdefault(classname, {})[nodeid] = 1
# can't set without having already loaded the node
self.cache[classname][nodeid] = node
self.savenode(classname, nodeid, node)
def savenode(self, classname, nodeid, node):
""" perform the saving of data specified by the set/addnode
"""
if __debug__:
logging.getLogger('roundup.hyperdb.backend').debug(
'save %s%s %r'%(classname, nodeid, node))
| |
<reponame>Quinlan2018/pyblp
"""Optimization routines."""
import contextlib
import functools
import os
import sys
import warnings
from pathlib import Path
from typing import Any, Callable, Iterable, Iterator, Optional, Tuple, Union
import numpy as np
import scipy.optimize
from .. import options
from ..utilities.basics import Array, Options, SolverStats, StringRepresentation, format_options
# objective function types
ObjectiveResults = Tuple[float, Optional[Array]]
ObjectiveFunction = Callable[[Array], ObjectiveResults]
class Optimization(StringRepresentation):
r"""Configuration for solving optimization problems.
Parameters
----------
method : `str or callable`
The optimization routine that will be used. The following routines support parameter bounds and use analytic
gradients:
- ``'knitro'`` - Uses an installed version of
`Artleys Knitro <https://www.artelys.com/solvers/knitro/>`_. Python 3 is supported by Knitro version 10.3
and newer. A number of environment variables most likely need to be configured properly, such as
``KNITRODIR``, ``ARTELYS_LICENSE``, ``LD_LIBRARY_PATH`` (on Linux), and ``DYLD_LIBRARY_PATH`` (on
Mac OS X). For more information, refer to the
`Knitro installation guide <https://www.artelys.com/docs/knitro//1_introduction/installation.html>`_.
- ``'slsqp'`` - Uses the :func:`scipy.optimize.minimize` SLSQP routine.
- ``'trust-constr'`` - Uses the :func:`scipy.optimize.minimize` trust-region routine.
- ``'l-bfgs-b'`` - Uses the :func:`scipy.optimize.minimize` L-BFGS-B routine.
- ``'tnc'`` - Uses the :func:`scipy.optimize.minimize` TNC routine.
The following routines also use analytic gradients but will ignore parameter bounds (not bounding the problem
may create issues if the optimizer tries out large parameter values that create overflow errors):
- ``'cg'`` - Uses the :func:`scipy.optimize.minimize` CG routine.
- ``'bfgs'`` - Uses the :func:`scipy.optimize.minimize` BFGS routine.
- ``'newton-cg'`` - Uses the :func:`scipy.optimize.minimize` Newton-CG routine.
The following routines do not use analytic gradients and will also ignore parameter bounds (without analytic
gradients, optimization will likely be much slower):
- ``'nelder-mead'`` - Uses the :func:`scipy.optimize.minimize` Nelder-Mead routine.
- ``'powell'`` - Uses the :func:`scipy.optimize.minimize` Powell routine.
The following trivial routine can be used to evaluate an objective at specific parameter values:
- ``'return'`` - Assume that the initial parameter values are the optimal ones.
Also accepted is a custom callable method with the following form::
method(initial, bounds, objective_function, iteration_callback, **options) -> (final, converged)
where ``initial`` is an array of initial parameter values, ``bounds`` is a list of ``(min, max)`` pairs for each
element in ``initial``, ``objective_function`` is a callable objective function of the form specified below,
``iteration_callback`` is a function that should be called without any arguments after each major iteration (it
is used to record the number of major iterations), ``options`` are specified below, ``final`` is an array of
optimized parameter values, and ``converged`` is a flag for whether the routine converged.
The ``objective_function`` has the following form:
objective_function(theta) -> (objective, gradient)
where ``gradient`` is ``None`` if ``compute_gradient is ``False``.
method_options : `dict, optional`
Options for the optimization routine.
For any non-custom ``method`` other than ``'knitro'`` and ``'return'``, these options will be passed to
``options`` in :func:`scipy.optimize.minimize`. Refer to the SciPy documentation for information about which
options are available for each optimization routine.
If ``method`` is ``'knitro'``, these options should be
`Knitro user options <https://www.artelys.com/docs/knitro//3_referenceManual/userOptions.html>`_. The
non-standard ``knitro_dir`` option can also be specified. The following options have non-standard default
values:
- **knitro_dir** : (`str`) - By default, the KNITRODIR environment variable is used. Otherwise, this
option should point to the installation directory of Knitro, which contains direct subdirectories such as
``'examples'`` and ``'lib'``. For example, on Windows this option could be
``'/Program Files/Artleys3/Knitro 10.3.0'``.
- **algorithm** : (`int`) - The optimization algorithm to be used. The default value is ``1``, which
corresponds to the Interior/Direct algorithm.
- **gradopt** : (`int`) - How the objective's gradient is computed. The default value is ``1`` if
``compute_gradient`` is ``True`` and is ``2`` otherwise, which corresponds to estimating the gradient with
finite differences.
- **hessopt** : (`int`) - How the objective's Hessian is computed. The default value is ``2``, which
corresponds to computing a quasi-Newton BFGS Hessian.
- **honorbnds** : (`int`) - Whether to enforce satisfaction of simple variable bounds. The default value is
``1``, which corresponds to enforcing that the initial point and all subsequent solution estimates satisfy
the bounds.
compute_gradient : `bool, optional`
Whether to compute an analytic objective gradient during optimization, which must be ``False`` if ``method``
does not use analytic gradients, and must be ``True`` if ``method`` is ``'newton-cg'``, which requires an
analytic gradient. By default, analytic gradients are computed. Not using an analytic gradient will likely slow
down estimation a good deal. If ``False``, an analytic gradient may still be computed once at the end of
optimization to compute optimization results.
universal_display : `bool, optional`
Whether to format optimization progress such that the display looks the same for all routines. By default, the
universal display is used and some ``method_options`` are used to prevent default displays from showing up.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/optimization.ipynb
.. raw:: latex
\end{examplenotebook}
"""
_optimizer: functools.partial
_description: str
_method_options: Options
_supports_bounds: bool
_compute_gradient: bool
_universal_display: bool
def __init__(
self, method: Union[str, Callable], method_options: Optional[Options] = None, compute_gradient: bool = True,
universal_display: bool = True) -> None:
"""Validate the method and set default options."""
simple_methods = {
'nelder-mead': (functools.partial(scipy_optimizer), "the Nelder-Mead algorithm implemented in SciPy"),
'powell': (functools.partial(scipy_optimizer), "the modified Powell algorithm implemented in SciPy")
}
unbounded_methods = {
'cg': (functools.partial(scipy_optimizer), "the conjugate gradient algorithm implemented in SciPy"),
'bfgs': (functools.partial(scipy_optimizer), "the BFGS algorithm implemented in SciPy"),
'newton-cg': (functools.partial(scipy_optimizer), "the Newton-CG algorithm implemented in SciPy")
}
bounded_methods = {
'l-bfgs-b': (functools.partial(scipy_optimizer), "the L-BFGS-B algorithm implemented in SciPy"),
'tnc': (functools.partial(scipy_optimizer), "the truncated Newton algorithm implemented in SciPy"),
'slsqp': (functools.partial(scipy_optimizer), "Sequential Least SQuares Programming implemented in SciPy"),
'trust-constr': (functools.partial(scipy_optimizer), "trust-region routine implemented in SciPy"),
'knitro': (functools.partial(knitro_optimizer), "an installed version of Artleys Knitro"),
'return': (functools.partial(return_optimizer), "a trivial routine that returns the initial parameters")
}
methods = {**simple_methods, **unbounded_methods, **bounded_methods}
# validate the configuration
if method not in methods and not callable(method):
raise ValueError(f"method must be one of {list(methods)} or a callable object.")
if method_options is not None and not isinstance(method_options, dict):
raise ValueError("method_options must be None or a dict.")
if method in simple_methods and compute_gradient:
raise ValueError(f"compute_gradient must be False when method is '{method}'.")
if method == 'newton-cg' and not compute_gradient:
raise ValueError(f"compute_gradient must be True when method is '{method}'.")
# initialize class attributes
self._compute_gradient = compute_gradient
self._universal_display = universal_display
self._supports_bounds = callable(method) or method in bounded_methods
# options are by default empty
if method_options is None:
method_options = {}
# options are simply passed along to custom methods
if callable(method):
self._optimizer = functools.partial(method)
self._description = "a custom method"
self._method_options = method_options
return
# identify the non-custom optimizer, configure arguments, and set default options
self._method_options: Options = {}
self._optimizer, self._description = methods[method]
self._optimizer = functools.partial(self._optimizer, compute_gradient=compute_gradient)
if method == 'knitro':
self._method_options.update({
'hessopt': 2,
'algorithm': 1,
'honorbnds': 1,
'gradopt': 1 if compute_gradient else 2,
'knitro_dir': os.environ.get('KNITRODIR'),
'outlev': 4 if not universal_display and options.verbose else 0
})
elif method != 'return':
self._optimizer = functools.partial(self._optimizer, method=method)
if not universal_display and options.verbose:
self._method_options['disp'] = True
if method in {'l-bfgs-b', 'slsqp'}:
self._method_options['iprint'] = 2
elif method == 'trust-constr':
self._method_options['verbose'] = 3
# update the default options
self._method_options.update(method_options)
# validate options for non-SciPy routines
if method == 'return' and self._method_options:
raise ValueError("The return method does not support any options.")
if method == 'knitro':
# get the location of the Knitro installation
knitro_dir = self._method_options.pop('knitro_dir')
if not isinstance(knitro_dir, (Path, str)):
raise OSError(
"If specified, the knitro_dir optimization option must point to the Knitro installation directory."
"Otherwise, the KNITRODIR environment variable must be configured."
)
# add relevant paths
for subdir in ['lib', 'examples/Python']:
full_path = Path(knitro_dir) / subdir
if not full_path.is_dir():
raise OSError(
f"Failed to find the directory '{full_path}'. Make sure a supported version of Knitro is "
f"properly installed and that the KNITRODIR environment variable exists. Alternatively, the "
f"knitro_dir optimization option should point to the Knitro installation directory."
)
sys.path.append(str(full_path))
# make sure that Knitro can be initialized
with knitro_context_manager():
pass
def __str__(self) -> str:
"""Format the configuration as a string."""
description = f"{self._description} {'with' if self._compute_gradient else 'without'} analytic gradients"
return f"Configured to optimize using {description} and options {format_options(self._method_options)}."
def _optimize(
| |
rows = c.fetchall()
conn.commit()
conn.close()
keys = []
for key in keys_db:
if (key == 'cash'):
keys.append("safe") # safe is not in the db (want to see it though)
if key != "json string":
keys.append(key)
TableCls = create_table('TableCls')
for key in keys:
TableCls.add_column(key, Col(key))
items = []
answer = {}
for row in rows:
js = json.loads(row[keys_db.index("json string")])
dt = datetime.datetime.strptime(row[keys_db.index("post date")], '%Y/%m/%d')
if (intyear == 1970 or dt.year == intyear):
if (js is not {}):
if ("start date" in js[0]):
dt = datetime.datetime.strptime(js[0]['start date'], '%Y/%m/%d')
col_list = []
for i in range(len(keys)):
if (keys[i] == "post date"):
col_list.append(dt.strftime("%b, %d"))
elif (keys[i] == "stock value"):
col_list.append(as_currency(row[i]))
stock = math.ceil(row[keys.index("stock value")] -.4)
safe = Safe(stock, verbose)
col_list.append(safe)
else:
if (keys_db.index("json string") != i):
col_list.append(as_currency(row[i]))
answer = dict(zip(keys, col_list))
items.append(answer)
table = TableCls(items, html_attrs = {'width':'100%','border-spacing':0})
if (verbose):
print ("***\n")
defaults, types = GetDefaults(False)
export_options = ""
if "snap shot" in defaults:
export_options += '<option value="AIM activity">AIM activity</option>'
export_options += '<option value="Archive snapshot {0}">Archive snapshot {0}</option>'.format(defaults['snap shot'], defaults['snap shot'])
export_options += '<option value="Current portfolio">Current portfolio</option>'
export_options += '<option value="Latest worksheet">Latest worksheet</option>'
return table.__html__(), export_options
#endregion aim
#region tests
def TestDefaults(verbose):
old_stdout = sys.stdout
print_out = StringIO()
sys.stdout = print_out
count = 0
fails = 0
total_tests = 23
defaults, types = GetDefaults(False)
if defaults == {}:
result = ResetDefaults(verbose)
if (result):
defaults, types = GetDefaults(False)
if (verbose):
print ("***")
print ("\tRunning tests will preserve your original defaults (if they exist)")
print ("***\n")
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', 'Test Folder', False)".format(count + 1))
result = UpdateDefaultItem("folder name", "Test Folder", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Company('AAPL', verbose)".format(count + 1))
result = Company("AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - MarketToTime('06:15', verbose)".format(count + 1))
result = MarketToTime("06:15", "US/Eastern", verbose)
if (result == "05:15AM"):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('tradier key', 'TEST', verbose)".format(count + 1))
result = UpdateDefaultItem("tradier key", "TEST", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('IEX key', 'TEST', verbose)".format(count + 1))
result = UpdateDefaultItem("IEX key", "TEST", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('poll minutes', 10, False)".format(count + 1))
result = UpdateDefaultItem('poll minutes', 10, verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('test root', 'test/', False)".format(count + 1))
result = UpdateDefaultItem("test root", "test/", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('open', '8:30AM', False)".format(count + 1))
result = UpdateDefaultItem("open", "8:30AM", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('close', '15:00', False)".format(count + 1))
result = UpdateDefaultItem("close", "15:00", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Add('AAPL', verbose)".format(count + 1))
result = Add( "AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - QuoteTradier('AAPL', verbose)".format(count + 1))
result = QuoteTradier("AAPL", verbose)
if (result[0]['Error Message'] == "Invalid Access Token"):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Price('AAPL', quote, verbose)".format(count + 1))
quote = {}
quote['price'] = 50.55
quote['quote'] = "test"
result = Price("AAPL", quote, verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetFolder(verbose)".format(count + 1))
result = GetFolder(verbose)
print (result)
if result != []:
for item in result:
if item['symbol'] == "AAPL":
if item['price'] == 50.55 and item['quote'] == "test":
if (verbose):
print ("\tpass.")
count += 1
break
else:
if (verbose):
print ("\tfail.")
fails += 1
break
else:
if (verbose):
print ("\tpass.")
count += 1
if (verbose):
print ("Test #{0} - Holiday(verbose)".format(count + 1))
result = Holiday(verbose)
if ("Error Message" in result or 'status' in result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
reset = defaults
for k,v in reset.items():
if (k != "username"):
if (verbose):
print ("Test #{0} - resets {1} back".format(count + 1, k))
result = UpdateDefaultItem(k, v, verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
testResults = False
if (fails == 0 and count == total_tests):
print ("ran {0} tests, all pass".format(total_tests))
testResults = True
else:
print ("test count expected {0} passes, received {1}, failures {2}".format(total_tests, count, fails))
testResults = False
sys.stdout = old_stdout
result_string = print_out.getvalue()
results = {}
results['status'] = testResults
results['total'] = total_tests
results['pass'] = count
results['fails'] = fails
results['output'] = result_string
return results
def TestFolder(verbose):
old_stdout = sys.stdout
print_out = StringIO()
sys.stdout = print_out
count = 0
fails = 0
total_tests = 13
defaults, types = GetDefaults(verbose)
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', 'Test Folder', verbose)".format(count + 1))
result = UpdateDefaultItem("folder name", "Test Folder", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Add('AAPL', verbose)".format(count + 1))
result = Add( "AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('$', '5000', verbose)".format(count + 1))
result = Balance( "$", "5000", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetFolderCount(verbose)".format(count + 1))
result = GetFolderCount(verbose)
if (result > 0):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetFolderCash(verbose)".format(count + 1))
result = GetFolderCash(verbose)
if (result == 5000):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('AAPL', '5000', verbose)".format(count + 1))
result = Balance("AAPL", "5000", verbose)
if (result['status']):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Shares('AAPL', '50', verbose)".format(count + 1))
result = Shares("AAPL", "50", verbose)
if (result['status']):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
folder = GetFolder(verbose)
if (verbose):
print ("Test #{0} - GetFolderValue('AAPL', 'price', folder)".format(count + 1))
result = GetFolderValue("AAPL", "price", folder)
if (result >= 0):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetFolderStockValue(verbose)".format(count + 1))
result = GetFolderStockValue(verbose)
if (result >= 0):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Update(verbose)".format(count + 1))
result = Update(verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Remove('AAPL', verbose)".format(count + 1))
result = Remove("AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
username = getpass.getuser()
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', 'Test Folder', verbose)".format(count + 1))
result = UpdateDefaultItem("folder name", defaults['folder name'], verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
| |
# -*- coding: utf-8 -*-
import os, time, subprocess, threading, ipaddress, paramiko, socket
import io
import re, signal
import changed_password_generator, changed_password_generator_lite
import pandas as pd
import tkinter as tk
from socket import AF_INET, SOCK_DGRAM
from tkinter import ttk, messagebox, TclError, simpledialog, Toplevel, Menu, PhotoImage, filedialog
from tkinter.ttk import Treeview
from tkinter import font as tkfont
from pathlib import Path
from threading import Thread
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title_font = tkfont.Font(family = "Helvetica", size = 26, weight = "bold", slant = "italic")
self.subtitle_font = tkfont.Font(size = 17)
self.start_page_button_font = tkfont.Font(size = 20)
self.drone_control_button_font = tkfont.Font(size = 20)
self.button_font = tkfont.Font(size = 12)
self.label_font = tkfont.Font(size = 15)
self.info_font = tkfont.Font(size = 12)
self.progressbar_color = ttk.Style()
self.progressbar_color.configure("green.Horizontal.TProgressbar", background='#07f523')
self.treeview_style = ttk.Style()
self.treeview_style.configure("font.Treeview", font=(None, 11))
# the container is where we'll stack a bunch of frames
# on top of each other, then the one we want visible
# will be raised above the others
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
self.frames = {}
for F in (StartPage, SelectInterface, RFLocationSelect, APDisplay, GetSelectedAPClientINFO, WifiAttack, RemoteServerConnect, DroneControl, FindHackrfDevice):
page_name = F.__name__
frame = F(parent = container, controller = self)
self.frames[page_name] = frame
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
frame.grid(row = 0, column = 0, sticky = "nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = self.frames[page_name]
frame.event_generate("<<ShowFrame>>")
frame.tkraise()
frame.config(background="white")
try: #Try to update meun bar
menubar = frame.menubar(self)
self.configure(menu = menubar)
except:
pass
class StartPage(tk.Frame):
def __init__(self, parent, controller):
global current_path
tk.Frame.__init__(self, parent)
self.controller = controller
self.askstring_runtime_counter = 0
get_current_path = Path(__file__).parent.absolute()
current_path = str(get_current_path)
title_label = tk.Label(self, background = "white", text = "Welcome", font = controller.title_font)
title_label.pack(side = "top", fill = "x", pady = 10)
subtitle_label = tk.Label(self, background = "white", text = "Please select service", font = controller.subtitle_font)
subtitle_label.pack()
try:
self.wifi_base_drone_button_icon = tk.PhotoImage(file = current_path + "/data/gui_img/wifi_icon.png")
self.fake_gps_button_icon = tk.PhotoImage(file = current_path + "/data/gui_img/rf_icon.png")
wifi_base_drone_button = tk.Button(self, background = "white", text = "Wi-Fi base drone", font = controller.start_page_button_font, image = self.wifi_base_drone_button_icon, compound = "top", width = 100,
command = lambda: self.sudo_password_input("wifi_base"))
wifi_base_drone_button.pack(side = "left", fill = "both", padx = 10, pady = 5, expand = True)
fake_gps_button = tk.Button(self, background = "white", text = "Fake GPS", font = controller.start_page_button_font, image = self.fake_gps_button_icon, compound = "top", width = 100,
command = lambda: self.sudo_password_input("rf_<PASSWORD>"))
fake_gps_button.pack(side = "right", fill = "both", padx = 10, pady = 5, expand = True)
except: #If icon not found
wifi_base_drone_button = tk.Button(self, background = "white", text = "Wi-Fi base drone", font = controller.start_page_button_font, width = 20,
command = lambda: self.sudo_password_input("wifi_base"))
wifi_base_drone_button.pack(side = "left", fill = "both", padx = 10, pady = 5, expand = True)
fake_gps_button = tk.Button(self, background = "white", text = "Fake GPS", font = controller.start_page_button_font, width = 20,
command = lambda: self.sudo_password_input("<PASSWORD>"))
fake_gps_button.pack(side = "right", fill = "both", padx = 10, pady = 5, expand = True)
def menubar(self, tool):
menubar = tk.Menu(tool)
option_tool = tk.Menu(menubar, tearoff = 0)
option_tool.add_command(label = "Wi-Fi base drone", command = lambda: self.sudo_password_input("wifi_base"))
option_tool.add_command(label = "RF base drone", command = lambda: self.sudo_password_input("rf_base"))
option_tool.add_separator()
option_tool.add_command(label = "Exit", command = lambda: quit())
menubar.add_cascade(label = "Option", menu = option_tool)
help_tool = tk.Menu(menubar, tearoff = 0)
help_tool.add_command(label = "Page guide", command = lambda: messagebox.showinfo("Page Guide",
"Thank you for using this programe.\nTo start, please select one option on the page.\n\nWi-Fi base drone: Exploit Wi-Fi attack to get the drone control rights.\n\nRF base drone: Using fake GPS signal to hijack the drone."))
help_tool.add_command(label = "About", command = lambda: messagebox.showinfo("Drone Hacking Tool",
"Code name: <NAME>ion\nVersion: 1.1.2.111\n\nGroup member:\n<NAME>\n<NAME>\n<NAME>"))
menubar.add_cascade(label = "Help", menu = help_tool)
return menubar
def sudo_password_input(self, user_selected_service):
#print(user_selected_service)
global sudo_password
if self.askstring_runtime_counter < 1:
sudo_password = simpledialog.askstring("Authentication Required", "Authentication is required to run this program\nPassword:", show = "*")
self.askstring_runtime_counter = self.askstring_runtime_counter + 1
if sudo_password == "":
if messagebox.showerror("Error", "You must type in password."):
quit()
elif sudo_password == None:
quit()
elif sudo_password != "":
sudo_password_validation = "echo " + sudo_password + " | sudo -S ls" #Root password validation
get_sudo_password_validation_states = subprocess.Popen(sudo_password_validation, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True, universal_newlines = True).stdout
sudo_password_validation_states = get_sudo_password_validation_states.read().splitlines()
sudo_password_validation_states_convert = str(sudo_password_validation_states) #Convert to string
if "incorrect password attempt" in sudo_password_validation_states_convert:
if messagebox.showerror("Authentication failed", "Invalid password, please try again."):
quit()
else:
if user_selected_service == "wifi_base":
self.controller.show_frame("SelectInterface")
elif user_selected_service == "rf_base":
self.controller.show_frame("FindHackrfDevice")
class SelectInterface(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.bind("<<ShowFrame>>", self.select_interface_gui)
def select_interface_gui(self, event):
self.title_label = tk.Label(self, background = "white", text = "Select Wi-Fi adapter:", font = self.controller.title_font)
self.title_label.pack(side = "top", fill = "x", pady = 10)
try:
self.label_wifi_adapter_label_image = tk.PhotoImage(file = current_path + "/data/gui_img/wifi_adapter.png")
self.wifi_adapter_label = tk.Label(self, background = "white", image = self.label_wifi_adapter_label_image)
self.wifi_adapter_label.pack(side = "top", pady = 10)
except:
self.wifi_adapter_label = tk.Label(self, background = "white", text = "IEEE 802.11 adapter", font = self.controller.label_font)
self.wifi_adapter_label.pack(side = "top", pady = 10)
self.adapter_listBox = tk.Listbox(self, font = self.controller.label_font, selectmode = tk.SINGLE)
self.adapter_listBox.pack()
try:
self.back_button_icon = tk.PhotoImage(file = current_path + "/data/gui_img/back_icon.png")
self.next_button_icon = tk.PhotoImage(file = current_path + "/data/gui_img/next_icon.png")
self.back_button = tk.Button(self, background = "white", text="Back", font = self.controller.button_font, image = self.back_button_icon, compound = "left",
command = lambda: [self.destroy_select_interface_gui(), self.controller.show_frame("StartPage")])
self.back_button.pack(side = "left", anchor = "sw")
self.next_button = tk.Button(self, background = "white", text="Next", font = self.controller.button_font, image = self.next_button_icon, compound = "right",
command = self.check_selection)
self.next_button.pack(side = "right", anchor = "se")
except: #If icon not found
self.back_button = tk.Button(self, background = "white", text="Back", font = self.controller.button_font,
command = lambda: [self.destroy_select_interface_gui(), self.controller.show_frame("StartPage")])
self.back_button.pack(side = "left", anchor = "sw")
self.next_button = tk.Button(self, background = "white", text="Next", font = self.controller.button_font,
command = self.check_selection)
self.next_button.pack(side = "right", anchor = "se")
self.load_interface()
def menubar(self, tool):
menubar = tk.Menu(tool)
option_tool = tk.Menu(menubar, tearoff = 0)
option_tool.add_command(label = "Back", command = lambda: [self.destroy_select_interface_gui(), self.controller.show_frame("StartPage")])
option_tool.add_separator()
option_tool.add_command(label = "Exit", command = lambda: quit())
menubar.add_cascade(label = "Option", menu = option_tool)
help_tool = tk.Menu(menubar, tearoff = 0)
help_tool.add_command(label = "Page guide", command = lambda: messagebox.showinfo("Page Guide",
"Please ready your Wi-Fi adapter, and make sure your adapter supports 'monitor' mode.\n\nIf you are connected to your Wi-Fi adapter correctly, you can see the adapter name on the screen."))
help_tool.add_command(label = "About", command = lambda: messagebox.showinfo("Drone Hacking Tool",
"Code name: <NAME>\nVersion: 1.1.2.111\n\nGroup member:\n<NAME>\n<NAME>\n<NAME>"))
menubar.add_cascade(label = "Help", menu = help_tool)
return menubar
def load_interface(self, runtime_counter = 0):
if runtime_counter < 1:
adapter_info = subprocess.Popen("iw dev 2>&1 | grep Interface | awk '{print $2}'", stdout = subprocess.PIPE, shell = True, universal_newlines = True).stdout
self.adapter_info_list = adapter_info.read().splitlines()
self.adapter_listBox.delete(0, "end")
app.after(10, lambda: self.load_interface(runtime_counter + 1)) #Wait 10 ms for loop
else:
if not self.adapter_info_list: #If no Wi-Fi adapter found
selected_interface_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp
self.check_log_file = Path(current_path + "/data/hack_drone_log.csv")
if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist
target_BSSID_log = [""]
channel_log = [""]
privacy_log = [""]
password_log = [""]
manufacturer_log = [""]
client_BSSID_log = [""]
selected_ap_timestamp_log = [selected_interface_timestamp]
states_log = ["Error: No interface found"]
dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log,"timestamp":selected_ap_timestamp_log, "states":states_log})
dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ',', mode = "a", header = False) #Write log data to "drone_attack_log.csv"
if messagebox.showerror("Error", "No interface found."):
self.destroy_select_interface_gui()
self.controller.show_frame("StartPage")
else:
for values in self.adapter_info_list:
self.adapter_listBox.insert("end", values)
def check_selection(self):
global selected_interface
try: #If user selected Wi-Fi interface
self.index_adapter_listBox = int(self.adapter_listBox.curselection()[0])
get_user_selected_interface = [self.adapter_listBox.get(values) for values in self.adapter_listBox.curselection()]
selected_interface_convert = str(get_user_selected_interface) #Convert to string
selected_interface_convert_strip = selected_interface_convert.strip("[(,)]") #Remove characters "[(,)]"
selected_interface = eval(selected_interface_convert_strip) #Remove characters "''"
message_user_select = ("Adapter " + selected_interface + " selected.")
if messagebox.askokcancel("Selected Wi-Fi Interface", message_user_select):
selected_interface_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp
self.check_log_file = Path(current_path + "/data/hack_drone_log.csv")
if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist
target_BSSID_log = [""]
channel_log = [""]
privacy_log = [""]
password_log = [""]
manufacturer_log = [""]
client_BSSID_log = [""]
selected_ap_timestamp_log = [selected_interface_timestamp]
states_log = ["Adapter " + selected_interface + " | |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import sys
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
logger = logging.getLogger('causalml')
def smd(feature, treatment):
"""Calculate the standard mean difference (SMD) of a feature between the
treatment and control groups.
The definition is available at
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3144483/#s11title
Args:
feature (pandas.Series): a column of a feature to calculate SMD for
treatment (pandas.Series): a column that indicate whether a row is in
the treatment group or not
Returns:
(float): The SMD of the feature
"""
t = feature[treatment == 1]
c = feature[treatment == 0]
return (t.mean() - c.mean()) / np.sqrt(.5 * (t.var() + c.var()))
def create_table_one(data, treatment_col, features):
"""Report balance in input features between the treatment and control groups.
References:
R's tableone at CRAN: https://github.com/kaz-yos/tableone
Python's tableone at PyPi: https://github.com/tompollard/tableone
Args:
data (pandas.DataFrame): total or matched sample data
treatment_col (str): the column name for the treatment
features (list of str): the column names of features
Returns:
(pandas.DataFrame): A table with the means and standard deviations in
the treatment and control groups, and the SMD between two groups
for the features.
"""
t1 = pd.pivot_table(data[features + [treatment_col]],
columns=treatment_col,
aggfunc=[lambda x: '{:.2f} ({:.2f})'.format(x.mean(),
x.std())])
t1.columns = t1.columns.droplevel(level=0)
t1['SMD'] = data[features].apply(
lambda x: smd(x, data[treatment_col])
).round(4)
n_row = pd.pivot_table(data[[features[0], treatment_col]],
columns=treatment_col,
aggfunc=['count'])
n_row.columns = n_row.columns.droplevel(level=0)
n_row['SMD'] = ''
n_row.index = ['n']
t1 = pd.concat([n_row, t1], axis=0)
t1.columns.name = ''
t1.columns = ['Control', 'Treatment', 'SMD']
t1.index.name = 'Variable'
return t1
class NearestNeighborMatch(object):
"""
Propensity score matching based on the nearest neighbor algorithm.
Attributes:
caliper (float): threshold to be considered as a match.
replace (bool): whether to match with replacement or not
ratio (int): ratio of control / treatment to be matched. used only if
replace=True.
shuffle (bool): whether to shuffle the treatment group data before
matching
random_state (numpy.random.RandomState or int): RandomState or an int
seed
"""
def __init__(self, caliper=.2, replace=False, ratio=1, shuffle=True,
random_state=None):
"""Initialize a propensity score matching model.
Args:
caliper (float): threshold to be considered as a match.
replace (bool): whether to match with replacement or not
shuffle (bool): whether to shuffle the treatment group data before
matching or not
random_state (numpy.random.RandomState or int): RandomState or an
int seed
"""
self.caliper = caliper
self.replace = replace
self.ratio = ratio
self.shuffle = shuffle
self.random_state = check_random_state(random_state)
def match(self, data, treatment_col, score_cols):
"""Find matches from the control group by matching on specified columns
(propensity preferred).
Args:
data (pandas.DataFrame): total input data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
"""
assert type(score_cols) == list, 'score_cols must be a list'
treatment = data.loc[data[treatment_col] == 1, score_cols]
control = data.loc[data[treatment_col] == 0, score_cols]
sdcal = self.caliper * np.std(data[score_cols].values)
if self.replace:
scaler = StandardScaler()
scaler.fit(data[score_cols])
treatment_scaled = pd.DataFrame(scaler.transform(treatment),
index=treatment.index)
control_scaled = pd.DataFrame(scaler.transform(control),
index=control.index)
# SD is the same as caliper because we use a StandardScaler above
sdcal = self.caliper
matching_model = NearestNeighbors(n_neighbors=self.ratio)
matching_model.fit(control_scaled)
distances, indices = matching_model.kneighbors(treatment_scaled)
# distances and indices are (n_obs, self.ratio) matrices.
# To index easily, reshape distances, indices and treatment into
# the (n_obs * self.ratio, 1) matrices and data frame.
distances = distances.T.flatten()
indices = indices.T.flatten()
treatment_scaled = pd.concat([treatment_scaled] * self.ratio,
axis=0)
cond = distances < sdcal
# Deduplicate the indices of the treatment group
t_idx_matched = list(set(
treatment_scaled.loc[cond].index.tolist()
))
# XXX: Should we deduplicate the indices of the control group too?
c_idx_matched = control_scaled.iloc[indices[cond]].index.tolist()
else:
assert len(score_cols) == 1, (
'Matching on multiple columns is only supported using the '
'replacement method (if matching on multiple columns, set '
'replace=True).'
)
# unpack score_cols for the single-variable matching case
score_col = score_cols[0]
if self.shuffle:
t_indices = self.random_state.permutation(treatment.index)
else:
t_indices = treatment.index
t_idx_matched = []
c_idx_matched = []
control['unmatched'] = True
for t_idx in t_indices:
dist = np.abs(control.loc[control.unmatched, score_col]
- treatment.loc[t_idx, score_col])
c_idx_min = dist.idxmin()
if dist[c_idx_min] <= sdcal:
t_idx_matched.append(t_idx)
c_idx_matched.append(c_idx_min)
control.loc[c_idx_min, 'unmatched'] = False
return data.loc[np.concatenate([np.array(t_idx_matched),
np.array(c_idx_matched)])]
def match_by_group(self, data, treatment_col, score_cols, groupby_col):
"""Find matches from the control group stratified by groupby_col, by
matching on specified columns (propensity preferred).
Args:
data (pandas.DataFrame): total sample data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
groupby_col (str): the column name to be used for stratification
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
"""
matched = data.groupby(groupby_col).apply(
lambda x: self.match(data=x, treatment_col=treatment_col,
score_cols=score_cols)
)
return matched.reset_index(level=0, drop=True)
class MatchOptimizer(object):
def __init__(self, treatment_col='is_treatment', ps_col='pihat',
user_col=None, matching_covariates=['pihat'], max_smd=0.1,
max_deviation=0.1, caliper_range=(0.01, 0.5),
max_pihat_range=(0.95, 0.999), max_iter_per_param=5,
min_users_per_group=1000, smd_cols=['pihat'],
dev_cols_transformations={'pihat': np.mean},
dev_factor=1., verbose=True):
"""Finds the set of parameters that gives the best matching result.
Score = (number of features with SMD > max_smd)
+ (sum of deviations for important variables
* deviation factor)
The logic behind the scoring is that we are most concerned with
minimizing the number of features where SMD is lower than a certain
threshold (max_smd). However, we would also like the matched dataset
not deviate too much from the original dataset, in terms of key
variable(s), so that we still retain a similar userbase.
Args:
- treatment_col (str): name of the treatment column
- ps_col (str): name of the propensity score column
- max_smd (float): maximum acceptable SMD
- max_deviation (float): maximum acceptable deviation for
important variables
- caliper_range (tuple): low and high bounds for caliper search
range
- max_pihat_range (tuple): low and high bounds for max pihat
search range
- max_iter_per_param (int): maximum number of search values per
parameters
- min_users_per_group (int): minimum number of users per group in
matched set
- smd_cols (list): score is more sensitive to these features
exceeding max_smd
- dev_factor (float): importance weight factor for dev_cols
(e.g. dev_factor=1 means a 10% deviation leads to penalty of 1
in score)
- dev_cols_transformations (dict): dict of transformations to be
made on dev_cols
- verbose (bool): boolean flag for printing statements
Returns:
The best matched dataset (pd.DataFrame)
"""
self.treatment_col = treatment_col
self.ps_col = ps_col
self.user_col = user_col
self.matching_covariates = matching_covariates
self.max_smd = max_smd
self.max_deviation = max_deviation
self.caliper_range = np.linspace(*caliper_range,
num=max_iter_per_param)
self.max_pihat_range = np.linspace(*max_pihat_range,
num=max_iter_per_param)
self.max_iter_per_param = max_iter_per_param
self.min_users_per_group = min_users_per_group
self.smd_cols = smd_cols
self.dev_factor = dev_factor
self.dev_cols_transformations = dev_cols_transformations
self.best_params = {}
self.best_score = 1e7 # ideal score is 0
self.verbose = verbose
self.pass_all = False
def single_match(self, score_cols, pihat_threshold, caliper):
matcher = NearestNeighborMatch(caliper=caliper, replace=True)
df_matched = matcher.match(
data=self.df[self.df[self.ps_col] < pihat_threshold],
treatment_col=self.treatment_col, score_cols=score_cols
)
return df_matched
def check_table_one(self, tableone, matched, score_cols, pihat_threshold,
caliper):
# check if better than past runs
smd_values = np.abs(tableone[tableone.index != 'n']['SMD'].astype(float))
num_cols_over_smd = (smd_values >= self.max_smd).sum()
self.cols_to_fix = smd_values[smd_values >= self.max_smd].sort_values(ascending=False).index.values
if self.user_col is None:
num_users_per_group = matched.reset_index().groupby(self.treatment_col)['index'].count().min()
else:
num_users_per_group = matched.groupby(self.treatment_col)[self.user_col].count().min()
deviations = [np.abs(self.original_stats[col] / matched[matched[self.treatment_col] == 1][col].mean() - 1)
for col in self.dev_cols_transformations.keys()]
score = num_cols_over_smd
score += len([col for col in self.smd_cols if smd_values.loc[col] >= self.max_smd])
score += np.sum([dev*10*self.dev_factor for dev in deviations])
# check if can be considered as best score
if score < self.best_score and num_users_per_group > self.min_users_per_group:
self.best_score = score
self.best_params = {'score_cols': score_cols.copy(), 'pihat': pihat_threshold, 'caliper': caliper}
self.best_matched = matched.copy()
if self.verbose:
logger.info('\tScore: {:.03f} (Best Score: {:.03f})\n'.format(score, self.best_score))
# check if passes all criteria
self.pass_all = ((num_users_per_group > self.min_users_per_group) and (num_cols_over_smd == 0) and
all([dev < self.max_deviation for dev in deviations]))
def match_and_check(self, score_cols, pihat_threshold, caliper):
if self.verbose:
logger.info('Preparing match for: caliper={:.03f}, '
'pihat_threshold={:.03f}, '
'score_cols={}'.format(caliper, pihat_threshold, score_cols))
df_matched = self.single_match(score_cols=score_cols, pihat_threshold=pihat_threshold, caliper=caliper)
tableone = create_table_one(df_matched, self.treatment_col, self.matching_covariates)
self.check_table_one(tableone, df_matched, score_cols, pihat_threshold, caliper)
def search_best_match(self, df):
self.df = df
self.original_stats = {}
for col, trans in self.dev_cols_transformations.items():
self.original_stats[col] = trans(self.df[self.df[self.treatment_col] == 1][col])
# search best max pihat
if self.verbose:
logger.info('SEARCHING FOR BEST PIHAT')
score_cols = [self.ps_col]
caliper = self.caliper_range[-1]
for pihat_threshold in self.max_pihat_range:
self.match_and_check(score_cols, pihat_threshold, caliper)
# search best score_cols
if self.verbose:
logger.info('SEARCHING FOR BEST SCORE_COLS')
pihat_threshold = | |
<reponame>googleinterns/cl_analysis
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from collections import defaultdict
from datetime import datetime, timedelta
from copy import deepcopy
import os
from model.constants import *
class CLData:
"""Class that holds a piece of datum of a CL.
This class holds the pull request level features, file level features,
and the lable indicating whether the CL is reverted.
Attributes:
pr_level_features: A list of pull request level features.
file_level_features: A dict of file level features.
reverted: A boolean variable indicating the CL reversion.
"""
def __init__(self):
"""
Init the CL data.
"""
self.pr_level_features = None
self.file_level_features = {}
self.reverted = False
class DataLoader:
"""
This class helps load the whole dataset either from the local extracted
feature csv file or from the local saved txt file.
Attributes:
repos: A list of repo names.
pr_columns: A list of pull request level feature names.
file_columns: A list of file level feature names.
"""
def __init__(self, repos):
"""
Init DataLoader
Args:
repos: A list of repo names.
"""
self.repos = repos
self.pr_columns = COMMON_PR_LEVEL_FEATURES + EXTRA_PR_LEVEL_FEATURES
self.file_columns = COMMON_FILE_LEVEL_FEATURES + \
EXTRA_FILE_LEVEL_FEATURES
@staticmethod
def _count_check_run_passed(lst):
"""
Count the total number of passed check runs.
Args:
lst: A list of 'passed', 'failed', 'none'
Returns:
A integer indicating the total number of passed check runs.
"""
if pd.isna(lst):
return 0
num_passed = 0
for check_run_result in eval(lst):
if check_run_result == 'passed':
num_passed += 1
return num_passed
@staticmethod
def _count_check_run_failed(lst):
"""
Count the total number of failed check runs.
Args:
lst: A list of 'passed', 'failed', 'none'
Returns:
A integer indicating the total number of failed check runs.
"""
if pd.isna(lst):
return 0
num_failed = 0
for check_run_result in eval(lst):
if check_run_result == 'failed':
num_failed += 1
return num_failed
def _get_pr_level_signals(self, repo):
"""
Load the pull request level signals for input repo.
Args:
repo: A str holds the repo to load from.
Returns:
A pandas dataframe holds pull request level signals.
"""
pr_level_signals = pd.read_csv(
'../data/%s_pull_requests_signals.csv' % repo)
pr_level_signals['check run passed'] = pr_level_signals[
'check run results'].apply(self._count_check_run_passed)
pr_level_signals['check run failed'] = pr_level_signals[
'check run results'].apply(self._count_check_run_failed)
return pr_level_signals
@staticmethod
def _get_file_level_signals(repo):
"""
Load the file level signals for input repo.
Args:
repo: A str holds the repo to load from.
Returns:
A pandas dataframe holds the file level signals of input repo
with null rows removed.
"""
file_level_signals = pd.read_csv(
'../data/%s_file_level_signals.csv' % repo)
return file_level_signals[file_level_signals['file name'].notna()]
@staticmethod
def _get_file_level_signals_dict(dates, repo):
"""
Load the file level features given repo name and the date range.
Args:
dates: A list of dates.
repo: A str holds the repo name
Returns:
A dict holds the file level features of given repo. Keys are the
dates and values are the dataframes.
"""
file_level_signals_dict = defaultdict(pd.DataFrame)
for date in dates:
date_str = date.strftime(format="%Y_%m_%d")
file_name = '../data/%s_%s_features.csv' % (repo, date_str)
file_level_signals_dict[date_str] = pd.read_csv(file_name)
return file_level_signals_dict
@staticmethod
def get_dates(file_level_signals):
"""
Compute the date range of given file level signals.
Args:
file_level_signals: A dataframe holds the file level signals.
Returns:
A list of dates.
"""
min_date = file_level_signals['pull request closed time'].min()
max_date = file_level_signals['pull request closed time'].max()
start_date = datetime.fromisoformat(min_date[:-1]) \
+ timedelta(days=1)
end_date = datetime.fromisoformat(max_date[:-1])
dates = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d")) \
.to_pydatetime().tolist()
return dates
@staticmethod
def _get_file_names(files_changes):
"""
Get the file names from the files changes list.
Args:
files_changes: A list of file changes.
Returns:
A list of file names.
"""
file_names = set()
for t in eval(files_changes):
file_name, _, _, _ = t
file_names.add(file_name)
return file_names
@staticmethod
def _get_num_reverted_file(file_names, file_level_signals):
"""
Get the num of files that are involved in CL rollbacks.
Args:
file_names: A list of file names
file_level_signals: A dataframe of file level signals
Returns:
A integer of the num of files that are involved in CL rollbacks.
"""
num_reverted_file = 0
for file_name in file_names:
selected_df = file_level_signals[
file_level_signals['file name'] == file_name]
if selected_df.empty:
continue
if selected_df['reverted pull request id count'].values[0] > 0:
num_reverted_file += 1
return num_reverted_file
@staticmethod
def _get_file_data(pr_id, file_names, file_level_signals, cl_data_dict):
"""
Fill in the file level features of the cl_data_dict and compute the
number of old files
Args:
pr_id: A integer of pull request id.
file_names: A list of file names.
file_level_signals: A dataframe holds the file level signals.
cl_data_dict: A dict of cl_data to fill in.
Returns:
An integer indicating the number of old files.
"""
num_old_files = 0
for i in range(len(file_level_signals)):
file_signals = file_level_signals.iloc[i]
file_name = file_signals['file name']
if file_name in file_names:
file_data = []
for feature in COMMON_FILE_LEVEL_FEATURES:
file_data.append(file_signals[feature])
reverted_cl_rate = \
file_signals['reverted pull request id count'] / \
file_signals['pull request id count']
file_data.append(reverted_cl_rate)
cl_data_dict[pr_id].file_level_features[file_name] = file_data
num_old_files += 1
return num_old_files
@staticmethod
def _get_pr_data(pr_signals, num_files, num_new_files, num_reverted_file):
"""
Get the pull request data.
Args:
pr_signals: A panda series of signals of one pull request.
num_files: An integer of number of files in pull request.
num_new_files: An integer of number of new files in pull request.
num_reverted_file: An integer of number of files that have been
involved in CL rollbacks before.
Returns:
A list of datum of one pull request.
"""
pr_data = []
for feature in COMMON_PR_LEVEL_FEATURES:
pr_data.append(pr_signals[feature])
pr_data.append(num_new_files)
pr_data.append(num_files)
pr_data.append(num_reverted_file)
if num_reverted_file:
pr_data.append(1)
else:
pr_data.append(0)
pr_data.append(num_reverted_file / num_files)
return pr_data
def _get_cl_data_dict(self, pr_level_signals, repo):
"""
Compute the CL data dict.
Args:
pr_level_signals: A dataframe of pull request level signals.
repo: A str of the repo name.
Returns:
A dict holds the CL data. The keys are the CL ids and the values
are one CLData object.
"""
cl_data_dict = defaultdict(CLData)
for index in range(len(pr_level_signals)):
pr_signals = pr_level_signals.iloc[index]
pr_id = pr_signals['pull request id']
reverted_pr_id = pr_signals['reverted pull request id']
if reverted_pr_id != 0:
cl_data_dict[reverted_pr_id].reverted = True
closed_date = datetime.fromisoformat(
pr_signals['pull request closed time'][:-1])\
.strftime(format="%Y_%m_%d")
files_changes = pr_signals['files changes']
file_names = self._get_file_names(files_changes)
num_files = len(file_names)
if not num_files:
continue
file_name = '../data/%s_%s_features.csv' % (repo, closed_date)
if not os.path.exists(file_name):
continue
file_level_signals = pd.read_csv(file_name)
num_reverted_file = self._get_num_reverted_file(file_names,
file_level_signals)
num_old_files = self._get_file_data(pr_id, file_names,
file_level_signals,
cl_data_dict)
num_new_files = num_files - num_old_files
pr_data = self._get_pr_data(
pr_signals, num_files, num_new_files, num_reverted_file)
cl_data_dict[pr_id].pr_level_features = deepcopy(pr_data)
return cl_data_dict
def load_data(self):
"""
Load data from all repos.
Returns:
A dict holds the CL data of all repos. The keys are the repo names
and the values are the CL data.
"""
training_data_dict = defaultdict(list)
for repo in self.repos:
print("Adding %s" % repo)
pr_level_signals = self._get_pr_level_signals(repo)
cl_data_dict = self._get_cl_data_dict(pr_level_signals, repo)
for pr_id in cl_data_dict:
cl_data = cl_data_dict[pr_id]
pr_features = cl_data.pr_level_features
if not pr_features:
continue
file_features = list(cl_data.file_level_features.values())
reverted = cl_data.reverted
training_data_dict[repo].append(
[pr_features, file_features, reverted])
return training_data_dict
def save_data_to_txt(self, training_data_dict):
"""
Save the data of all repos to local txt files.
Args:
training_data_dict: A dict holds the CL data of all repos. The keys are the repo names
and the values are the CL data.
Returns:
None
"""
for repo in training_data_dict:
repo_data = training_data_dict[repo]
with open('../data/%s_data.txt' % repo, 'w') as file:
file.write(str(self.pr_columns))
file.write('\n')
file.write(str(self.file_columns))
file.write('\n')
for datum in repo_data:
file.write(str(datum))
file.write('\n')
def load_data_from_txt(self):
"""
Load the data of all repos from the local txt files.
Returns:
load_pr_columns: A list of pull request level feature names.
load_file_columns: A list of file level feature names.
load_data_dict: A dict holds the CL data of all repos.
The keys are the repo names and the values are the CL data.
"""
load_data_dict = {}
for repo in self.repos:
with open('../data/%s_data.txt' % repo, 'r') as file:
load_pr_columns = eval(file.readline())
load_file_columns = eval(file.readline())
lsts = []
for line in file:
lst = eval(line)
lsts.append(lst)
load_data_dict[repo] = lsts
return load_pr_columns, load_file_columns, load_data_dict
def | |
import requests
import json
import sys
import numpy as np
from Group import Group
# python3 migrate_users_and_groups.py.py qa 0000000000000000000000000000000000000000000000000000000000000000 qa 0000000000000000000000000000000000000000000000000000000000000000
def get_groups(env, admin_api_key):
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/get_groups_v2"
r1 = requests.get(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key})
if ('ErrorCode' in r1.json()):
if (r1.json()['ErrorCode'] == "NotFound"):
# print("No Groups")
return None
# print(r1.json())
# print(r1.json()['Groups'])
# print("Grabbed list of groups\n")
return r1.json()['Groups']
def check_duplicate_group(env2, admin_api_key2, group_name):
# you have to do this becaues the only way to add users to group is with add_user(s) call which requires a unique group name
groups2 = get_groups(env2, admin_api_key2)
if (groups2 is None): # if there are no groups yet, then this should return false because there can't be a duplicate
return False
else:
list_of_groups = np.full(np.shape(groups2)[0], "group-name-heref6a-bdab-1c3b9a4f5ced")
for j in np.arange(0, np.shape(groups2)[0]):
list_of_groups[j] = groups2[j]['Name']
return np.any(np.isin(list_of_groups, group_name))
def create_group(env2, admin_api_key2, group_name):
# add a validator to make sure there are no groups with two names
if not (check_duplicate_group(env2, admin_api_key2, group_name)):
api_url = "https://" + env2 + ".cloudcheckr.com/api/account.json/create_group"
r3 = requests.post(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key2}, data = json.dumps({"name": group_name}))
if ("Message" in r3.json()):
if (r3.json()["Message"] == "OK"):
print("Created Group: " + group_name + " in " + env2 + " with id: " + r3.json()["group_id"])
print(r3.json())
return r3.json()["group_id"]
else:
print("Could not create group " + group_name)
print(r3.json())
else:
print("Could not create group " + group_name)
print(r3.json())
else:
print("Duplicate Group Name not Supported. Change group name " + group_name + " of original environment")
return None
def create_groups(env1, admin_api_key1, env2, admin_api_key2, groups):
"""
Gets the list of groups from the original environment and migrates them to the new environment
It is limited, so a group name can not be duplicated.
"""
# print(np.shape(groups))
group_links = np.full((np.shape(groups)[0],3), "00000000-0000-0000-0000-000000000000")
# print(group_links)
for i in np.arange(0, np.shape(groups)[0]):
group_links[i][0] = groups[i]["Id"]
new_group_id = create_group(env2, admin_api_key2, groups[i]['Name'])
if not (new_group_id is None):
group_links[i][1] = new_group_id
group_links[i][2] = groups[i]["Name"]
# print(group_links)
print("Finished Creating groups\n")
return group_links
def get_accounts_in_group(env, admin_api_key, group_id):
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/get_accounts_by_group?group_id=" + group_id
r4 = requests.get(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key})
if ("ErrorCode" in r4.json()):
print("No accounts permissionsed in this group " + group_id)
return None
else:
projects = r4.json()["Projects"]
use_account_list = np.full(np.shape(projects)[0], "00000000-0000-0000-0000-000000000000")
for i in np.arange(0, np.size(use_account_list)):
use_account_list[i] = projects[i]["Name"]
return use_account_list
def get_list_of_group_acls_for_account(env, admin_api_key, group_id, account_name):
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/get_access_control_list_per_group?group_id=" + group_id + "&use_account=" + account_name
r5 = requests.get(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key})
if ("ErrorCode" in r5.json()):
print(r5.json())
return None
else:
if not ("Acls" in r5.json()):
return None
acls = r5.json()["Acls"]
list_of_acls = []
for i in np.arange(0, np.shape(acls)[0]):
list_of_acls.append(acls[i]["Id"])
return list_of_acls
def add_group_permissions_for_account(env1, admin_api_key1, env2, admin_api_key2, group1_id, group2_id, account_name):
list_of_acls = get_list_of_group_acls_for_account(env1, admin_api_key1, group1_id, account_name)
if not (list_of_acls is None):
api_url = "https://" + env2 + ".cloudcheckr.com/api/account.json/add_access_control_lists_per_account_per_group"
r6 = requests.post(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key2}, data = json.dumps({"group_id": group2_id, "use_account": account_name, "acls": list_of_acls}))
# print(r6.json())
if not ("ErrorCode" in r6.json()):
if ("Id" in r6.json()):
print("Added Group Permissions from " + group1_id + " to " + r6.json()["Id"] + " for the account " + account_name)
print(r6.json())
else:
print("Could NOT add group permissions from " + group1_id + " to group2_id for the account " + account_name)
print(r6.json())
else:
print("Could NOT add group permissions from " + group1_id + " to group2_id for the account " + account_name)
print(r6.json())
else:
print("Could NOT add group permissions from " + group1_id + " to group2_id for the account " + account_name)
print(r6.json())
def add_group_permissions(env1, admin_api_key1, env2, admin_api_key2, group1_id, group2_id):
"""
For every account in a group add the associated permissions for the account
"""
use_account_list = get_accounts_in_group(env1, admin_api_key1, group1_id)
if not (use_account_list is None):
for i in np.arange(0, np.size(use_account_list)):
if not (use_account_list[i] is None):
add_group_permissions_for_account(env1, admin_api_key1, env2, admin_api_key2, group1_id, group2_id, use_account_list[i])
def add_groups_permissions(env1, admin_api_key1, env2, admin_api_key2, group_links):
"""
For every group add the permissions for all of the associated accounts
"""
for i in np.arange(0, np.shape(group_links)[0]):
add_group_permissions(env1, admin_api_key1, env2, admin_api_key2, group_links[i][0], group_links[i][1])
print("Finished adding All Groups Permissions\n")
def environment_validator(env):
"""
Checks if the passed in environment is a valid one. like qa or eu
"""
if (env is None):
print("environment is blank")
return False # can't use app because when using the api, you should be using api.cloudcheckr.com
if (env == "qa" or env == "api" or env == "eu" or env == "au"):
return True
def environment_checkr(env1, env2):
"""
Checks whether or not you have to add +migration to a username if the environments are the same
returns -1 if the environments are invalid
returns 0 if you have to the +migration
returns 1 if it is strictly from qa to app
"""
if (environment_validator(env1) and environment_validator(env2)):
if (env1 != env2):
return 1
else:
return 0 # must add the +migration to user
else:
return -1
def get_users_in_group(env, admin_api_key, group_link_row):
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/get_users_by_group?group_id=" + group_link_row[0]
r1 = requests.get(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key})
if ("ErrorCode" in r1.json()):
print("No users in the group " + group_link_row[2])
return None
else:
Users = r1.json()["Users"]
users_list = np.full(np.shape(Users)[0], "user-name-heref6a-bdab-1c3b9a4f5ced")
for i in np.arange(0, np.shape(Users)[0]):
users_list[i] = Users[i]["Email"]
if (np.size(users_list) == 0):
return None
return users_list
def build_group_objects(env1, admin_api_key1, group_links):
"""
Builds a list of Group objects that will be used to more quickly check for users in groups
"""
Groups_List = [] # initialize a python list of objects
if not (group_links is None):
for i in np.arange(0, np.shape(group_links)[0]):
g1 = Group(group_links[i][0],group_links[i][1], group_links[i][2])
g1.add_users(get_users_in_group(env1, admin_api_key1, group_links[i]))
Groups_List.append(g1)
return Groups_List
else:
return None
def build_groups(env1, admin_api_key1, env2, admin_api_key2):
"""
Builds the groups in the new environment and copies over the permissions
"""
groups = get_groups(env1, admin_api_key1)
print("Grabbed Original Group List")
if (groups is None): # this script should work even if there are no groups
print("No Groups in original environment")
return None
else:
group_links = create_groups(env1, admin_api_key1, env2, admin_api_key2, groups)
print("Start adding group permissions")
add_groups_permissions(env1, admin_api_key1, env2, admin_api_key2, group_links)
Groups_List = build_group_objects(env1, admin_api_key1, group_links)
return group_links, Groups_List
def get_users(env, admin_api_key):
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/get_users_v2"
r1 = requests.get(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key})
if not ("ErrorCode" in r1.json()):
return r1.json()["user_permissions"]
else:
return None
def get_group_name_of_user(Groups_List, user_email):
if (Groups_List is None):
return None
if (np.size(Groups_List) is None):
return None
for i in np.arange(0, np.size(Groups_List)):
if (Groups_List[i].check_user(user_email)):
return Groups_List[i].name
return None # user isn't in a group
def get_corrected_email(email, env_type):
"""
If you are transfering users from qa to qa, you have to add +migration to make the users unique.
(cheating a bit)
"""
if (env_type):
return email
# I should add functionality to handle an initial email with a +qa part
split_array = email.split("@")
return split_array[0] + "+migration" + "@" + split_array[1]
def add_user(env, admin_api_key, env_type, email, role, auth, group_name):
"""
Adds a user to the new environment. Will only add to a gorup if the user is in a group.
Can't use add_users because users in the same group could have different roles.
"""
api_url = "https://" + env + ".cloudcheckr.com/api/account.json/add_user"
r7 = None
email = get_corrected_email(email, env_type)
if (group_name is None):
r7 = requests.post(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key}, data = json.dumps({"email": email, "user_role": role, "auth_types": auth}))
else:
r7 = requests.post(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key}, data = json.dumps({"email": email, "user_role": role, "auth_types": auth, "group": group_name}))
if ("Message" in r7.json()):
print("Failed to create new user " + email)
print(r7.json())
else:
if ("CreationStatuses" in r7.json()):
if (group_name is None):
print("Created the user " + email)
print(r7.json())
else:
print("Created the user " + email + " in the group " + group_name)
print(r7.json())
else:
print("Failed to create user " + email)
print(r7.json())
return email
def add_users(env1, admin_api_key1, env2, admin_api_key2, env_type, users, Groups_List):
"""
Adds all the users to the new account. Will add to a group if the user is currently in a group.
"""
if (users is None):
print("No users in the account")
return None
if (np.shape(users)[0] == 0):
print("No users in the account")
return None
| |
version 1.
#
old_version = self.string
base, _base_= [], old_version.split(".")
increase = True
for i in _base_:
base.append(int(i))
count = len(base)-1
for i in range(len(base)):
if increase:
if base[count] >= 9:
if count > 0:
base[count-1] += 1
base[count] = 0
increase = False
else:
base[count] += 1
break
else:
base[count] += 1
break
else:
if count > 0 and int(base[count]) >= 10:
base[count-1] += 1
base[count] = 0
increase = False
elif count == 0: break
count -= 1
version = ""
for i in base:
if version == "": version = str(i)
else: version += "."+str(i)
return version
# slice dict from string.
# get the first {} from the string by depth.
def slice_dict(self, depth=1):
return self.between(["{", "}"], depth=depth)
# slice array from string.
# get the first [] from the string by depth.
def slice_array(self, depth=1):
return self.between(["[", "]"], depth=depth)
# slice tuple from string.
# get the first () from the string by depth.
def slice_tuple(self, depth=1):
return self.between(["(", ")"], depth=depth)
# iterate chars.
# > for charcount, char in String.iterate_chars()
def iterate_chars(self):
charcount, items = 0, []
for char in self.string:
items.append([charcount, char])
charcount += 1
return items
def iterate_characters(self):
return self.iterate_chars()
# iterate lines.
# > for linecount, line in String.iterate_lines()
def iterate_lines(self):
linecount, items = 0, []
for line in self.string.split("\n"):
items.append([linecount, line])
linecount += 1
return items
# slice indent from string.
# get the content bewteen the \n{indent}
def indent(self, indent=4):
s = ""
for i in range(indent): s += " "
return s
def line_indent(self, line=""):
# get line indent.
line = line.replace(" ", " ")
if len(line) > 0 and " " in line:
line_indent = 0
for c in line:
if c in [" "]: line_indent += 1
else: break
else: line_indent = 0
return Formats.Integer(line_indent)
def slice_indent(self, indent=4, depth=1, string=None, remove_indent=True):
if string == None: string = self.string
string = string.replace(" ", " ")
s, open, opened, d = "", 0, False, 0
for line in string.split("\n"):
# get line indent.
if len(line) > 0 and " " in line:
line_indent = 0
for c in line:
if c in [" "]: line_indent += 1
else: break
else: line_indent = 0
# check indent match.
if (not opened and line_indent == indent) or (opened and line_indent >= indent):
if d >= depth:
if remove_indent:
s += line[indent:]+"\n"
else:
s += line+"\n"
opened = True
#elif len(line) > 0 and not opened and line_indent == indent:
# d += 1
elif len(line) > 0 and line_indent <= indent:
if opened:
break
else:
d += 1
return s
# get the first / last n characters of the string.
def first(self, count):
if isinstance(count, (int, float, Integer)):
count = int(count)
else:
count = len(count)
return self.string[:count]
def last(self, count):
if isinstance(count, (int, float, Integer)):
count = int(count)
else:
count = len(count)
if len(self.string) >= count:
return self.string[count:]
else:
return None
#
# remove first / last n characters of the string.
def remove_first(self, count):
if isinstance(count, (int, float, Integer)):
count = int(count)
else:
count = len(count)
removed = self.first(count)
self.string = self.string[count:]
return self.string
def remove_last(self, count):
if isinstance(count, (int, float, Integer)):
count = int(count)
else:
count = len(count)
removed = self.last(count)
self.string = self.string[:-count]
return self.string
#
# support default str functions.
def split(self, string):
if isinstance(string, (list, Array)):
if isinstance(string, Array): array = string.array
else: array = string
new, last, next_start = [], "", None
for i in self.string:
last += i
newslice = False
#l_next_start = None
for test in array:
if test in last:
if str(last[-len(test):]) == str(test):
#l_next_start = last[:-len(test)]
last = last[:-len(test)]
newslice = True
break
if newslice:
new.append(last)
last = ""
#if next_start == None: new.append(last)
#elif include:
# new.append(next_start+last)
# next_start = None
#if include and l_next_start != None:
# next_start = l_next_start
if last != "":
new.append(last)
return new
else:
return Files.Array(self.string.split(str(string)))
def count(self, string):
return Formats.Integer(self.string.count(str(string)))
def replace(self, from_, to_):
return self.string.replace(str(from_), str(to_))
def lower(self, string):
return self.string.lower(str(string))
def upper(self, string):
return self.string.upper(str(string))
# support "+" & "-" .
def __add__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.")
return self.string + string
def __iadd__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.")
self.string = self.string + string
return self
def __sub__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.")
return self.string.replace(string, "")
def __isub__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.")
self.string = self.string.replace(string, "")
return self
# support subscriptionable.
def __getitem__(self, index):
return self.string[Formats.denitialize(index)]
def __setitem__(self, index, value):
self.string[Formats.denitialize(index)] = str(value)
# support default iteration.
def __iter__(self):
return iter(self.string)
# support '>=' & '>' operator.
def __gt__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self.string) > len(string)
def __ge__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self.string) >= len(string)
# support '<=' & '<' operator.
def __lt__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self.string) < len(string)
def __le__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.")
return len(self.string) <= len(string)
# support '==' & '!=' operator.
def __eq__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
return False
return self.string == string
def __ne__(self, string):
if isinstance(string, str):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
return True
return self.string != string
# support +.
def __concat__(self, string):
if isinstance(string, (str)):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(value, self.__class__):
raise Exceptions.FormatError(f"Can not concat object {self.__class__} & {string.__class__}.")
return self.string + string
# support 'in' operator.
def __contains__(self, string):
if isinstance(string, (list, Files.Array)):
for i in string:
if str(i) in str(self.string):
return True
return False
else:
return str(string) in str(self.string)
#
# representation.
def __repr__(self):
return str(self)
# str representation.
def __str__(self):
return str(self.string)
# int representation.
def __int__(self):
return int(self.string)
# float representation.
def __float__(self):
return float(self.string)
# bool representation.
def __bool__(self):
return len(self.string) > 0
#if self.string in [1.0, 1, "true", "True", "TRUE", True]:
# return True
#elif self.string in [0, 0.0, "false", "False", "FALSE", False]:
# return False
#else:
# raise Exceptions.FormatError(f"Could not parse a bool from {self.__id__()}.")
# content count.
def __len__(self):
return len(self.string)
# object id.
def __id__(self):
return f"({self.instance()}:{str(self)})"
# # object instance.
def instance(self):
return "String"
#
@property
def __name__(self):
return self.instance()
# support self assignment.
def assign(self, string):
if isinstance(string, (int, float)):
a=1
elif isinstance(string, self.__class__):
string = string.string
elif not isinstance(string, self.__class__):
raise Exceptions.FormatError(f"Can not assign object {self.__class__} & {string.__class__}.")
self.string = str(string)
return self
# return raw data.
def raw(self):
return self.str
#
# the boolean object class.
class Boolean(object):
def __init__(self,
# the boolean's value (bool) (#1).
boolean=False,
# the path (str, FilePath) (param #2).
path=False,
# load the data on initialization.
load=False,
# the default array (will be created if file path does not exist).
default=None,
):
# docs.
DOCS = {
"module":"Boolean",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# check self instance.
if isinstance(boolean, Formats.Boolean):
boolean = boolean.bool
# init.
self.bool = boolean
if self.bool in ["true", "True", "TRUE", True]: self.bool = True
else: self.bool = False
# path.
if path == False: self.file_path = self.fp = None # used in local memory (not fysical)
else: self.file_path = self.fp = Formats.FilePath(path)
if default != None and not Files.exists(self.file_path.path): self.save(array=default)
if load: self.load()
#
def save(self, bool=None, path=None, sudo=False):
if bool != None: bool = self.bool
if path == None: path = self.file_path.path
utils.__check_memory_only__(path)
self.bool = bool
return Files.save(path, str(bool), format="str", sudo=sudo)
def load(self, default=None, sudo=False):
utils.__check_memory_only__(self.file_path.path)
if not os.path.exists(self.file_path.path) and default != None:
self.save(default, sudo=sudo)
self.bool = Files.load(self.file_path.path, format="str", sudo=sudo)
return self.bool
def string(self, true="True", false="False"):
if self.bool:
return true
else:
return false
# native support.
def __index__(self):
return int(self)
# support '==' & '!=' operator.
def __eq__(self, boolean):
if isinstance(boolean, bool):
return self.bool == boolean
elif not isinstance(boolean, self.__class__):
return False
return self.bool == boolean.bool
def __ne__(self, boolean):
if isinstance(boolean, bool):
return self.bool != boolean
elif not isinstance(boolean, self.__class__):
return True
return self.bool != boolean.bool
# support default iteration.
def __iter__(self):
return iter(str(self.bool))
# support 'in' operator.
def __contains__(self, string):
return string in str(self.bool)
#
# representation.
def __repr__(self):
return str(self)
#
# str representation.
def __str__(self):
return str(self.bool)
# int representation.
def __int__(self):
if self.bool:
return 1
else:
return 0
# float representation.
def __float__(self):
if self.bool:
return 1.0
else:
return 0.0
# bool representation.
def __bool__(self):
return self.bool
# object id.
def __id__(self):
return f"({self.instance()}:{str(self)})"
# object instance.
def instance(self):
return "Boolean"
#
@property
def __name__(self):
return self.instance()
# support self assignment.
def assign(self, boolean):
if isinstance(boolean, (int, float)):
a=1
elif isinstance(value, self.__class__):
boolean = boolean.bool
elif not isinstance(boolean, self.__class__):
raise Exceptions.FormatError(f"Can not assign object {self.__class__} & {boolean.__class__}.")
self.bool = boolean
return self
# return raw data.
def raw(self):
return self.bool
#
# the integer object class.
class Integer(object):
def __init__(self,
# the integers value (int, float) (param #1).
value=0,
# the path (str, FilePath) (param #2).
path=False,
# the integer format (str) (param #3).
format="auto",
# load the data on initialization.
load=False,
# the default array (will be created if file path does not exist).
default=None,
):
# docs.
DOCS = {
"module":"Integer",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# check self instance.
if isinstance(value, Formats.Integer):
if "." in str(value):
value = value.float
else:
value = value.int
# init.
if "." in str(value):
self.format = "float"
self.value = float(value)
else:
self.format = "int"
self.value = int(value)
self.int = int(value)
self.float = float(value)
# path.
if path == False: self.file_path = self.fp = None # used in local memory (not fysical)
else: self.file_path = self.fp = Formats.FilePath(path)
if default != None and not Files.exists(self.file_path.path): self.save(array=default)
if load: self.load()
#
def save(self, data=None, path=None, sudo=False):
if data != None: data = self.raw()
if path == None: path = self.file_path.path
utils.__check_memory_only__(path)
if data != self.raw():
self.assign(data)
return Files.save(path, str(data), format="str", sudo=sudo)
def load(self, default=None, sudo=False):
utils.__check_memory_only__(self.file_path.path)
if not os.path.exists(self.file_path.path) and default != None:
self.save(default, sudo=sudo)
data = Files.load(self.file_path.path, format="str", sudo=sudo)
self.assign(data)
return data
def increase_version(self):
# version 1.
#
old_version = self.value
base, _base_= [], old_version.split(".")
increase = True
for i in _base_:
base.append(int(i))
count = len(base)-1
for i in range(len(base)):
if increase:
if base[count] >= 9:
if count > 0:
base[count-1] += 1
base[count] | |
self.Tabdata.Set_edition(4)
self.assertEqual(self.Tabdata.edition(), 4)
def testSet_edition(self):
# check we can put a value in and read it out
self.Tabdata.Set_edition(3)
self.assertEqual(self.Tabdata.edition(), 3)
def testcompressflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_compressflag(1)
self.assertEqual(self.Tabdata.compressflag(), 1)
def testSet_compressflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_compressflag(0)
self.assertEqual(self.Tabdata.compressflag(), 0)
def testcrcflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_crcflag(1)
self.assertEqual(self.Tabdata.crcflag(), 1)
def testSet_crcflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_crcflag(0)
self.assertEqual(self.Tabdata.crcflag(), 0)
def testambleflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_ambleflag(1)
self.assertEqual(self.Tabdata.ambleflag(), 1)
def testSet_ambleflag(self):
# check we can put a value in and read it out
self.Tabdata.Set_ambleflag(0)
self.assertEqual(self.Tabdata.ambleflag(), 0)
def testsegment(self):
# check we can put a value in and read it out
self.Tabdata.Set_segment(5)
self.assertEqual(self.Tabdata.segment(), 5)
def testSet_segment(self):
# check we can put a value in and read it out
self.Tabdata.Set_segment(1)
self.assertEqual(self.Tabdata.segment(), 1)
def testsa(self):
# check we can put a value in and read it out
self.Tabdata.Set_sa(10)
self.assertEqual(self.Tabdata.sa(), 10)
def testSet_sa(self):
# check we can put a value in and read it out
self.Tabdata.Set_sa(9)
self.assertEqual(self.Tabdata.sa(), 9)
def testdfa(self):
# check we can put a value in and read it out
self.Tabdata.Set_dfa(20)
self.assertEqual(self.Tabdata.dfa(), 20)
def testSet_dfa(self):
# check we can put a value in and read it out
self.Tabdata.Set_dfa(10)
self.assertEqual(self.Tabdata.dfa(), 10)
def testdatasize(self):
# check we can put a value in and read it out
self.Tabdata.Set_datasize(300)
self.assertEqual(self.Tabdata.datasize(), 300)
def testSet_datasize(self):
# check we can put a value in and read it out
self.Tabdata.Set_datasize(200)
self.assertEqual(self.Tabdata.datasize(), 200)
def testnocrcdatasize(self):
# check two cases - one with the crc flag on (1) and one off (0)
self.Tabdata.Set_datasize(200)
self.Tabdata.Set_crcflag(1)
self.assertEqual(self.Tabdata.nocrcdatasize(), 198)
self.Tabdata.Set_crcflag(0)
self.assertEqual(self.Tabdata.nocrcdatasize(), 200)
def testdfn(self):
# check we can put a value in and read it out
self.Tabdata.Set_dfn(9)
self.assertEqual(self.Tabdata.dfn(), 9)
def testSet_dfn(self):
# check we can put a value in and read it out
self.Tabdata.Set_dfn(22)
self.assertEqual(self.Tabdata.dfn(), 22)
def testtimetag(self):
# check we can put a value in and read it out
self.Tabdata.Set_timetag(1000)
self.assertEqual(self.Tabdata.timetag(), 1000)
def testSet_timetag(self):
# check we can put a value in and read it out
self.Tabdata.Set_timetag(1200)
self.assertEqual(self.Tabdata.timetag(), 1200)
def testsynctype(self):
# check we can put a value in and read it out
self.Tabdata.Set_synctype("None")
self.assertEqual(self.Tabdata.synctype(), "None")
def testSet_synctype(self):
# check we can put a value in and read it out
self.Tabdata.Set_synctype("SUPER FRAME SYNC")
self.assertEqual(self.Tabdata.synctype(), "SUPER FRAME SYNC")
def testreserved(self):
# check we can put a value in and read it out
self.Tabdata.Set_reserved("0000000000")
self.assertEqual(self.Tabdata.reserved(), "0000000000")
def testSet_reserved(self):
# check we can put a value in and read it out
self.Tabdata.Set_reserved("0100000000")
self.assertEqual(self.Tabdata.reserved(), "0100000000")
def testheadcrc(self):
# check we can put a value in and read it out
self.Tabdata.Set_headcrc("FFAD")
self.assertEqual(self.Tabdata.headcrc(), "FFAD")
def testSet_headcrc(self):
# check we can put a value in and read it out
self.Tabdata.Set_headcrc("FFAC")
self.assertEqual(self.Tabdata.headcrc(), "FFAC")
def testRequester_Idx_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Requester_Idx_Num(1)
self.assertEqual(self.Tabdata.Requester_Idx_Num(), 1)
def testSet_Requester_Idx_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Requester_Idx_Num(2)
self.assertEqual(self.Tabdata.Requester_Idx_Num(), 2)
def testGroup_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Group_ID_Num(1)
self.assertEqual(self.Tabdata.Group_ID_Num(), 1)
def testSet_Group_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Group_ID_Num(3)
self.assertEqual(self.Tabdata.Group_ID_Num(), 3)
def testEvent_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Event_ID_Num(1)
self.assertEqual(self.Tabdata.Event_ID_Num(), 1)
def testSet_Event_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Event_ID_Num(100)
self.assertEqual(self.Tabdata.Event_ID_Num(), 100)
def testSegment_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Segment_ID_Num(1)
self.assertEqual(self.Tabdata.Segment_ID_Num(), 1)
def testSet_Segment_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Segment_ID_Num(4)
self.assertEqual(self.Tabdata.Segment_ID_Num(), 4)
def testLocation_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Location_ID_Num(1)
self.assertEqual(self.Tabdata.Location_ID_Num(), 1)
def testSet_Location_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Location_ID_Num(31)
self.assertEqual(self.Tabdata.Location_ID_Num(), 31)
def testTarget_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Target_ID_Num(1)
self.assertEqual(self.Tabdata.Target_ID_Num(), 1)
def testSet_Target_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Target_ID_Num(11)
self.assertEqual(self.Tabdata.Target_ID_Num(), 11)
def testGimbal_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Gimbal_ID_Num(15)
self.assertEqual(self.Tabdata.Gimbal_ID_Num(), 15)
def testSet_Gimbal_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Gimbal_ID_Num(5)
self.assertEqual(self.Tabdata.Gimbal_ID_Num(), 5)
def testSensor_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Sensor_ID_Num(5)
self.assertEqual(self.Tabdata.Sensor_ID_Num(), 5)
def testSet_Sensor_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Sensor_ID_Num(60)
self.assertEqual(self.Tabdata.Sensor_ID_Num(), 60)
def testPlatform_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Platform_ID_Num(0)
self.assertEqual(self.Tabdata.Platform_ID_Num(), 0)
def testSet_Platform_ID_Num(self):
# check we can put a value in and read it out
self.Tabdata.Set_Platform_ID_Num(2)
self.assertEqual(self.Tabdata.Platform_ID_Num(), 2)
def testtablecode(self):
# check we can put a value in and read it out
self.Tabdata.Set_tablecode(1)
self.assertEqual(self.Tabdata.tablecode(), 1)
def testsourcecode(self):
# check we can put a value in and read it out
self.Tabdata.Set_sourcecode(1)
self.assertEqual(self.Tabdata.sourcecode(), 1)
def testSet_tablecode(self):
# check we can put a value in and read it out
self.Tabdata.Set_tablecode(10)
self.assertEqual(self.Tabdata.tablecode(), 10)
def testSet_sourcecode(self):
# check we can put a value in and read it out
self.Tabdata.Set_sourcecode(10)
self.assertEqual(self.Tabdata.sourcecode(), 10)
def testtotlen(self):
# check we can put a value in and read it out
self.Tabdata.Set_totlen(2000)
self.assertEqual(self.Tabdata.totlen(), 2000)
def testSet_totlen(self):
# check we can put a value in and read it out
self.Tabdata.Set_totlen(5000)
self.assertEqual(self.Tabdata.totlen(), 5000)
def testclaimlen(self):
# check we can put a value in and read it out
self.Tabdata.Set_claimlen(2000)
self.assertEqual(self.Tabdata.claimlen(), 2000)
def testSet_claimlen(self):
# check we can put a value in and read it out
self.Tabdata.Set_claimlen(3000)
self.assertEqual(self.Tabdata.claimlen(), 3000)
def testextraraw(self):
# check we can put a value in and read it out
self.Tabdata.Set_extraraw(b"143464576543435325ABCDEF")
self.assertEqual(self.Tabdata.extraraw(), b"143464576543435325ABCDEF")
def testSet_extraraw(self):
# check we can put a value in and read it out
self.Tabdata.Set_extraraw(b"143464576543435325ABCDEFAAA")
self.assertEqual(
self.Tabdata.extraraw(), b"143464576543435325ABCDEFAAA")
def testpacketnum(self):
# check we can put a value in and read it out
self.Tabdata.Set_packetnum(20)
self.assertEqual(self.Tabdata.packetnum(), 20)
def testSet_packetnum(self):
# check we can put a value in and read it out
self.Tabdata.Set_packetnum(35)
self.assertEqual(self.Tabdata.packetnum(), 35)
def testblockdataextract(self):
# check we can put a value in and read it out
self.Tabdata.Set_blockdataextract(True)
self.assertEqual(self.Tabdata.blockdataextract(), True)
def testSet_blockdataextract(self):
# check we can put a value in and read it out
self.Tabdata.Set_blockdataextract(False)
self.assertEqual(self.Tabdata.blockdataextract(), False)
def testherrors(self):
# get the output and check basic value are in it ok
self.Tabdata.hdr.errors.adderror(2,2,"test abc")
a = self.Tabdata.herrors()
self.assertTrue(isinstance(a,NPIF.NPIF_Error))
self.assertEqual(a.einfo(0),(2,2,"test abc"))
def testtablename(self):
# check we can put a value in and read it out
self.Tabdata.Set_tablename("blah")
self.assertEqual(self.Tabdata.tablename(), "blah")
def testSet_tablename(self):
# check we can put a value in and read it out
self.Tabdata.Set_tablename("blah2")
self.assertEqual(self.Tabdata.tablename(), "blah2")
def testdataraw(self):
# check we can put a value in and read it out
self.Tabdata.Set_dataraw(b"143464576543435325ABCDEF")
self.assertEqual(self.Tabdata.dataraw(), b"143464576543435325ABCDEF")
def testSet_dataraw(self):
# check we can put a value in and read it out
self.Tabdata.Set_dataraw(b"\x12\x13\x14\x15\x16\x17\x1a\x1b\x20")
self.assertEqual(
self.Tabdata.dataraw(), b"\x12\x13\x14\x15\x16\x17\x1a\x1b\x20")
def testdatacrc(self):
self.Tabdata.Set_datacrc(b"123A")
self.assertEqual(self.Tabdata.datacrc(), b"123A")
def testSet_datacrc(self):
# check we can put a value in and read it out
self.Tabdata.Set_datacrc(b"126A")
self.assertEqual(self.Tabdata.datacrc(), b"126A")
def testnumfieldsrepeating(self):
# check we can put a value in and read it out
self.Tabdata.Set_numfieldsrepeating(3)
self.assertEqual(self.Tabdata.numfieldsrepeating(), 3)
def testSet_numfieldsrepeating(self):
# check we can put a value in and read it out
self.Tabdata.Set_numfieldsrepeating(9)
self.assertEqual(self.Tabdata.numfieldsrepeating(), 9)
def testnumrepeats(self):
# check we can put a value in and read it out
self.Tabdata.Set_numrepeats(4)
self.assertEqual(self.Tabdata.numrepeats(), 4)
def testSet_numrepeats(self):
# check we can put a value in and read it out
self.Tabdata.Set_numrepeats(8)
self.assertEqual(self.Tabdata.numrepeats(), 8)
def testfieldnames(self):
# check we can put a value in and read it out
indat = ('a', 'b', 'c')
self.Tabdata.Set_fieldnames(indat)
self.assertEqual(self.Tabdata.fieldnames(), indat)
def testSet_fieldnames(self):
# check we can put a value in and read it out
indat = ('a', 'b', 'c', 'blah')
self.Tabdata.Set_fieldnames(indat)
self.assertEqual(self.Tabdata.fieldnames(), indat)
def testfieldtypes(self):
# check we can put a value in and read it out
indat = ('h', 'i', 'e', 'x')
self.Tabdata.Set_fieldtypes(indat)
self.assertEqual(self.Tabdata.fieldtypes(), indat)
def testSet_fieldtypes(self):
# check we can put a value in and read it out
indat = ('h', 'i', 'e')
self.Tabdata.Set_fieldtypes(indat)
self.assertEqual(self.Tabdata.fieldtypes(), indat)
def testfieldfuncs(self):
# check we can put | |
from stonky.enums import CurrencyType
from stonky.stock import Stock
AMD_STOCK = Stock(
ticket="AMD",
currency=CurrencyType.USD,
current_amount=452.49,
delta_amount=14.540009,
delta_percent=0.033234306,
market_price=452.04,
volume=41486205,
)
AAPL_STOCK = Stock(
ticket="AAPL",
currency=CurrencyType.USD,
current_amount=81.83,
delta_amount=-0.7700043,
delta_percent=-0.009320957,
market_price=81.84,
volume=57092710,
)
VGRO_TO_STOCK = Stock(
ticket="VGRO.TO",
currency=CurrencyType.USD,
current_amount=27.12,
delta_amount=-0.12000084,
delta_percent=-0.004402085,
market_price=27.14,
volume=126570,
)
SHOP_TO_STOCK = Stock(
ticket="SHOP.TO",
currency=CurrencyType.CAD,
current_amount=1318.02,
delta_amount=10.869995,
delta_percent=0.008287331,
market_price=1322.51,
volume=127212,
)
AAPL_RESPONSE = {
"quoteSummary": {
"result": [
{
"summaryDetail": {
"maxAge": 1,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"previousClose": {"raw": 437.5, "fmt": "437.50"},
"open": {"raw": 441.99, "fmt": "441.99"},
"dayLow": {"raw": 441.19, "fmt": "441.19"},
"dayHigh": {"raw": 453.1, "fmt": "453.10"},
"regularMarketPreviousClose": {
"raw": 437.5,
"fmt": "437.50",
},
"regularMarketOpen": {"raw": 441.99, "fmt": "441.99"},
"regularMarketDayLow": {"raw": 441.19, "fmt": "441.19"},
"regularMarketDayHigh": {"raw": 453.1, "fmt": "453.10"},
"dividendRate": {"raw": 3.28, "fmt": "3.28"},
"dividendYield": {"raw": 0.0075, "fmt": "0.75%"},
"exDividendDate": {"raw": 1596758400, "fmt": "2020-08-07"},
"payoutRatio": {"raw": 0.2373, "fmt": "23.73%"},
"fiveYearAvgDividendYield": {"raw": 1.56, "fmt": "1.56"},
"beta": {"raw": 1.226373, "fmt": "1.23"},
"trailingPE": {"raw": 34.284412, "fmt": "34.28"},
"forwardPE": {"raw": 29.088804, "fmt": "29.09"},
"volume": {
"raw": 41486205,
"fmt": "41.49M",
"longFmt": "41,486,205",
},
"regularMarketVolume": {
"raw": 41486205,
"fmt": "41.49M",
"longFmt": "41,486,205",
},
"averageVolume": {
"raw": 36773992,
"fmt": "36.77M",
"longFmt": "36,773,992",
},
"averageVolume10days": {
"raw": 50134385,
"fmt": "50.13M",
"longFmt": "50,134,385",
},
"averageDailyVolume10Day": {
"raw": 50134385,
"fmt": "50.13M",
"longFmt": "50,134,385",
},
"bid": {"raw": 452.49, "fmt": "452.49"},
"ask": {"raw": 452.67, "fmt": "452.67"},
"bidSize": {"raw": 2200, "fmt": "2.2k", "longFmt": "2,200"},
"askSize": {"raw": 1000, "fmt": "1k", "longFmt": "1,000"},
"marketCap": {
"raw": 1932755861504,
"fmt": "1.93T",
"longFmt": "1,932,755,861,504",
},
"yield": {},
"ytdReturn": {},
"totalAssets": {},
"expireDate": {},
"strikePrice": {},
"openInterest": {},
"fiftyTwoWeekLow": {"raw": 199.67, "fmt": "199.67"},
"fiftyTwoWeekHigh": {"raw": 457.65, "fmt": "457.65"},
"priceToSalesTrailing12Months": {
"raw": 7.0575366,
"fmt": "7.06",
},
"fiftyDayAverage": {"raw": 390.98856, "fmt": "390.99"},
"twoHundredDayAverage": {"raw": 321.10718, "fmt": "321.11"},
"trailingAnnualDividendRate": {"raw": 3.13, "fmt": "3.13"},
"trailingAnnualDividendYield": {
"raw": 0.007154286,
"fmt": "0.72%",
},
"navPrice": {},
"currency": "USD",
"fromCurrency": None,
"toCurrency": None,
"lastMarket": None,
"volume24Hr": {},
"volumeAllCurrencies": {},
"circulatingSupply": {},
"algorithm": None,
"maxSupply": {},
"startDate": {},
"tradeable": False,
},
"price": {
"maxAge": 1,
"preMarketChangePercent": {
"raw": 0.00973716,
"fmt": "0.97%",
},
"preMarketChange": {"raw": 4.26001, "fmt": "4.26"},
"preMarketTime": 1597238999,
"preMarketPrice": {"raw": 441.76, "fmt": "441.76"},
"preMarketSource": "FREE_REALTIME",
"postMarketChangePercent": {
"raw": 0.0009954466,
"fmt": "0.10%",
},
"postMarketChange": {"raw": 0.4499817, "fmt": "0.45"},
"postMarketTime": 1597276785,
"postMarketPrice": {"raw": 452.49, "fmt": "452.49"},
"postMarketSource": "DELAYED",
"regularMarketChangePercent": {
"raw": 0.033234306,
"fmt": "3.32%",
},
"regularMarketChange": {"raw": 14.540009, "fmt": "14.54"},
"regularMarketTime": 1597262401,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"regularMarketPrice": {"raw": 452.04, "fmt": "452.04"},
"regularMarketDayHigh": {"raw": 453.1, "fmt": "453.10"},
"regularMarketDayLow": {"raw": 441.19, "fmt": "441.19"},
"regularMarketVolume": {
"raw": 41486205,
"fmt": "41.49M",
"longFmt": "41,486,205.00",
},
"averageDailyVolume10Day": {
"raw": 50134385,
"fmt": "50.13M",
"longFmt": "50,134,385",
},
"averageDailyVolume3Month": {
"raw": 36773992,
"fmt": "36.77M",
"longFmt": "36,773,992",
},
"regularMarketPreviousClose": {
"raw": 437.5,
"fmt": "437.50",
},
"regularMarketSource": "FREE_REALTIME",
"regularMarketOpen": {"raw": 441.99, "fmt": "441.99"},
"strikePrice": {},
"openInterest": {},
"exchange": "NMS",
"exchangeName": "NasdaqGS",
"exchangeDataDelayedBy": 0,
"marketState": "POSTPOST",
"quoteType": "EQUITY",
"symbol": "AAPL",
"underlyingSymbol": None,
"shortName": "Apple Inc.",
"longName": "Apple Inc.",
"currency": "USD",
"quoteSourceName": "Delayed Quote",
"currencySymbol": "$",
"fromCurrency": None,
"toCurrency": None,
"lastMarket": None,
"volume24Hr": {},
"volumeAllCurrencies": {},
"circulatingSupply": {},
"marketCap": {
"raw": 1932755861504,
"fmt": "1.93T",
"longFmt": "1,932,755,861,504.00",
},
},
}
],
"error": None,
}
}
BTC_RESPONSE = {
"quoteSummary": {
"result": [
{
"summaryDetail": {
"maxAge": 1,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"previousClose": {"raw": 11583.987, "fmt": "11,583.99"},
"open": {"raw": 11583.987, "fmt": "11,583.99"},
"dayLow": {"raw": 11568.912, "fmt": "11,568.91"},
"dayHigh": {"raw": 11649.112, "fmt": "11,649.11"},
"regularMarketPreviousClose": {
"raw": 11583.987,
"fmt": "11,583.99",
},
"regularMarketOpen": {"raw": 11583.987, "fmt": "11,583.99"},
"regularMarketDayLow": {
"raw": 11568.912,
"fmt": "11,568.91",
},
"regularMarketDayHigh": {
"raw": 11649.112,
"fmt": "11,649.11",
},
"dividendRate": {},
"dividendYield": {},
"exDividendDate": {},
"payoutRatio": {},
"fiveYearAvgDividendYield": {},
"beta": {},
"forwardPE": {},
"volume": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232",
},
"regularMarketVolume": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232",
},
"averageVolume": {
"raw": 23604677600,
"fmt": "23.6B",
"longFmt": "23,604,677,600",
},
"averageVolume10days": {
"raw": 22428947875,
"fmt": "22.43B",
"longFmt": "22,428,947,875",
},
"averageDailyVolume10Day": {
"raw": 22428947875,
"fmt": "22.43B",
"longFmt": "22,428,947,875",
},
"bid": {},
"ask": {},
"bidSize": {},
"askSize": {},
"marketCap": {
"raw": 213843984384,
"fmt": "213.84B",
"longFmt": "213,843,984,384",
},
"yield": {},
"ytdReturn": {},
"totalAssets": {},
"expireDate": {},
"strikePrice": {},
"openInterest": {},
"fiftyTwoWeekLow": {"raw": 4106.981, "fmt": "4,106.98"},
"fiftyTwoWeekHigh": {"raw": 12045.141, "fmt": "12,045.14"},
"priceToSalesTrailing12Months": {},
"fiftyDayAverage": {"raw": 9946.923, "fmt": "9,946.92"},
"twoHundredDayAverage": {
"raw": 8833.817,
"fmt": "8,833.82",
},
"trailingAnnualDividendRate": {},
"trailingAnnualDividendYield": {},
"navPrice": {},
"currency": "USD",
"fromCurrency": "BTC",
"toCurrency": "USD=X",
"lastMarket": "CoinMarketCap",
"volume24Hr": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232.00",
},
"volumeAllCurrencies": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232.00",
},
"circulatingSupply": {
"raw": 18459200,
"fmt": "18.46M",
"longFmt": "18,459,200.00",
},
"algorithm": None,
"maxSupply": {},
"startDate": {"raw": 1367107200, "fmt": "2013-04-28"},
"tradeable": False,
},
"price": {
"maxAge": 1,
"preMarketChange": {},
"preMarketPrice": {},
"postMarketChange": {},
"postMarketPrice": {},
"regularMarketChangePercent": {
"raw": 6.0107894e-05,
"fmt": "0.01%",
},
"regularMarketChange": {"raw": 0.69628906, "fmt": "0.70"},
"regularMarketTime": 1597288594,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"regularMarketPrice": {
"raw": 11584.684,
"fmt": "11,584.68",
},
"regularMarketDayHigh": {
"raw": 11649.112,
"fmt": "11,649.11",
},
"regularMarketDayLow": {
"raw": 11568.912,
"fmt": "11,568.91",
},
"regularMarketVolume": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232.00",
},
"averageDailyVolume10Day": {
"raw": 22428947875,
"fmt": "22.43B",
"longFmt": "22,428,947,875",
},
"averageDailyVolume3Month": {
"raw": 23604677600,
"fmt": "23.6B",
"longFmt": "23,604,677,600",
},
"regularMarketPreviousClose": {
"raw": 11583.987,
"fmt": "11,583.99",
},
"regularMarketSource": "FREE_REALTIME",
"regularMarketOpen": {"raw": 11583.987, "fmt": "11,583.99"},
"strikePrice": {},
"openInterest": {},
"exchange": "CCC",
"exchangeName": "CCC",
"exchangeDataDelayedBy": 0,
"marketState": "REGULAR",
"quoteType": "CRYPTOCURRENCY",
"symbol": "BTC-USD",
"underlyingSymbol": None,
"shortName": "<NAME>",
"longName": None,
"currency": "USD",
"quoteSourceName": "CryptoCompare",
"currencySymbol": "$",
"fromCurrency": "BTC",
"toCurrency": "USD=X",
"lastMarket": "CoinMarketCap",
"volume24Hr": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232.00",
},
"volumeAllCurrencies": {
"raw": 24981215232,
"fmt": "24.98B",
"longFmt": "24,981,215,232.00",
},
"circulatingSupply": {
"raw": 18459200,
"fmt": "18.46M",
"longFmt": "18,459,200.00",
},
"marketCap": {
"raw": 213843984384,
"fmt": "213.84B",
"longFmt": "213,843,984,384.00",
},
},
}
],
"error": None,
}
}
HBLFX_RESPONSE = {
"quoteSummary": {
"result": [
{
"summaryDetail": {
"maxAge": 1,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"previousClose": {"raw": 15.04, "fmt": "15.04"},
"open": {},
"dayLow": {},
"dayHigh": {},
"regularMarketPreviousClose": {
"raw": 15.04,
"fmt": "15.04",
},
"regularMarketOpen": {},
"regularMarketDayLow": {},
"regularMarketDayHigh": {},
"dividendRate": {},
"dividendYield": {},
"exDividendDate": {},
"payoutRatio": {},
"fiveYearAvgDividendYield": {},
"beta": {},
"forwardPE": {},
"volume": {},
"regularMarketVolume": {},
"averageVolume": {"raw": 0, "fmt": None, "longFmt": "0"},
"averageVolume10days": {
"raw": 0,
"fmt": None,
"longFmt": "0",
},
"averageDailyVolume10Day": {
"raw": 0,
"fmt": None,
"longFmt": "0",
},
"bid": {},
"ask": {},
"bidSize": {},
"askSize": {},
"marketCap": {},
"yield": {"raw": 0.0279, "fmt": "2.79%"},
"ytdReturn": {"raw": -0.0251, "fmt": "-2.51%"},
"totalAssets": {
"raw": 13300383744,
"fmt": "13.3B",
"longFmt": "13,300,383,744",
},
"expireDate": {},
"strikePrice": {},
"openInterest": {},
"fiftyTwoWeekLow": {"raw": 11.73, "fmt": "11.73"},
"fiftyTwoWeekHigh": {"raw": 15.28, "fmt": "15.28"},
"priceToSalesTrailing12Months": {},
"fiftyDayAverage": {"raw": 14.680572, "fmt": "14.68"},
"twoHundredDayAverage": {"raw": 14.265363, "fmt": "14.27"},
"trailingAnnualDividendRate": {},
"trailingAnnualDividendYield": {},
"navPrice": {},
"currency": "USD",
"fromCurrency": None,
"toCurrency": None,
"lastMarket": None,
"volume24Hr": {},
"volumeAllCurrencies": {},
"circulatingSupply": {},
"algorithm": None,
"maxSupply": {},
"startDate": {},
"tradeable": False,
},
"price": {
"maxAge": 1,
"preMarketChange": {},
"preMarketPrice": {},
"postMarketChange": {},
"postMarketPrice": {},
"regularMarketChangePercent": {
"raw": 0.0026595744,
"fmt": "0.27%",
},
"regularMarketChange": {"raw": 0.04, "fmt": "0.04"},
"regularMarketTime": 1597276823,
"priceHint": {"raw": 2, "fmt": "2", "longFmt": "2"},
"regularMarketPrice": {"raw": 15.08, "fmt": "15.08"},
"regularMarketDayHigh": {},
"regularMarketDayLow": {},
"regularMarketVolume": {},
"averageDailyVolume10Day": {
"raw": 0,
"fmt": None,
"longFmt": "0",
},
"averageDailyVolume3Month": {
"raw": 0,
"fmt": None,
"longFmt": "0",
},
"regularMarketPreviousClose": {
"raw": 15.04,
"fmt": "15.04",
},
"regularMarketSource": "DELAYED",
"regularMarketOpen": {},
"strikePrice": {},
"openInterest": {},
"exchange": "NAS",
"exchangeName": "Nasdaq",
"exchangeDataDelayedBy": 0,
"marketState": "POSTPOST",
"quoteType": "MUTUALFUND",
"symbol": "HBLFX",
"underlyingSymbol": None,
"shortName": "The Hartford Balanced Income Fu",
"longName": "The Hartford Balanced Income Fund Class F",
"currency": "USD",
"quoteSourceName": "Delayed Quote",
"currencySymbol": "$",
"fromCurrency": None,
"toCurrency": None,
"lastMarket": None,
"volume24Hr": {},
"volumeAllCurrencies": {},
"circulatingSupply": {},
"marketCap": {},
},
}
],
"error": None,
}
}
CAD_USD_REPONSE = {
"spark": {
"result": [
{
"symbol": "CADUSD=X",
"response": [
{
"meta": {
"currency": "USD",
"symbol": "CADUSD=X",
"exchangeName": "CCY",
"instrumentType": "CURRENCY",
"firstTradeDate": 1063753200,
"regularMarketTime": 1597288995,
"gmtoffset": 3600,
"timezone": "BST",
"exchangeTimezoneName": "Europe/London",
"regularMarketPrice": 0.7555,
"chartPreviousClose": 0.7546,
"previousClose": 0.7546,
"scale": 4,
"priceHint": 4,
"currentTradingPeriod": {
"pre": {
"timezone": "BST",
"start": 1597273200,
"end": 1597273200,
"gmtoffset": 3600,
},
"regular": {
"timezone": "BST",
"start": 1597273200,
"end": 1597359540,
"gmtoffset": 3600,
},
"post": {
"timezone": "BST",
"start": 1597359540,
"end": 1597359540,
"gmtoffset": 3600,
},
},
"dataGranularity": "5m",
"range": "1m",
"validRanges": [
"1d",
"5d",
"1mo",
"3mo",
"6mo",
"1y",
"2y",
"5y",
"10y",
"ytd",
"max",
],
},
"indicators": {"quote": | |
<reponame>uktrade/market-access-python-frontend<gh_stars>1-10
from http import HTTPStatus
from django.urls import resolve, reverse
from mock import patch
from core.tests import ReportsTestCase
from reports.models import Report
from reports.views import (
NewReportBarrierAdminAreasView,
NewReportBarrierLocationAddAdminAreasView,
NewReportBarrierLocationHasAdminAreasView,
NewReportBarrierLocationView,
NewReportBarrierTradeDirectionView,
)
from tests.constants import ERROR_HTML
class LocationViewTestCase(ReportsTestCase):
"""Country without admin areas."""
def setUp(self):
super().setUp()
self.url = reverse("reports:barrier_location")
def test_country_url_resolves_to_location_view(self):
match = resolve("/reports/new/country/")
assert match.func.view_class == NewReportBarrierLocationView
def test_location_view_returns_correct_html(self):
expected_title = "<title>Market Access - Add - Location of the barrier</title>"
expected_dropdown_container = (
'<select class="govuk-select" id="location" name="location">'
)
dropdown_option = '<option class="location_option"'
country_count = 195
expected_continue_btn = (
'<input type="submit" value="Continue" class="govuk-button">'
)
response = self.client.get(self.url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert expected_title in html
assert expected_dropdown_container in html
options_count = html.count(dropdown_option)
assert (
country_count < options_count
), f"Expected {country_count} or more country options, got: {options_count}"
assert (
country_count + 50 > options_count
), f"Expected ~{country_count} country options, got: {options_count} - ensure there are no duplicates."
assert expected_continue_btn in html
@patch("reports.helpers.ReportFormGroup.save")
def test_location_cannot_be_empty(self, mock_save):
field_name = "location"
session_key = "draft_barrier__location_form_data"
response = self.client.post(self.url, data={field_name: ""})
saved_form_data = self.client.session.get(session_key)
html = response.content.decode("utf8")
form = response.context["form"]
assert HTTPStatus.OK == response.status_code
assert form.is_valid() is False
assert field_name in form.errors
assert ERROR_HTML.SUMMARY_HEADER in html
assert ERROR_HTML.REQUIRED_FIELD in html
assert saved_form_data is None
assert mock_save.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_location_saved_in_session(self, mock_create):
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
field_name = "location"
session_key = "draft_barrier__location_form_data"
fiji_uuid = "d9f682ac-5d95-e211-a939-e4115bead28a"
expected_form_data = {"country": fiji_uuid, "trading_bloc": ""}
response = self.client.post(self.url, data={field_name: fiji_uuid}, follow=True)
saved_form_data = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert expected_form_data == saved_form_data
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_trading_bloc_location_saved_in_session(self, mock_create):
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
session_key = "draft_barrier__location_form_data"
expected_form_data = {"country": None, "trading_bloc": "TB00016"}
response = self.client.post(self.url, data={"location": "TB00016"}, follow=True)
saved_form_data = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert expected_form_data == saved_form_data
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_saving_location_redirects_to_correct_view(self, mock_create):
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
field_name = "location"
fiji_uuid = "d9f682ac-5d95-e211-a939-e4115bead28a"
redirect_url = reverse("reports:barrier_trade_direction")
response = self.client.post(self.url, data={field_name: fiji_uuid})
self.assertRedirects(response, redirect_url)
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_saving_location_with_trading_bloc_redirects_to_correct_view(
self, mock_create
):
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
field_name = "location"
france_uuid = "82756b9a-5d95-e211-a939-e4115bead28a"
redirect_url = reverse("reports:barrier_caused_by_trading_bloc")
response = self.client.post(self.url, data={field_name: france_uuid})
self.assertRedirects(response, redirect_url)
assert mock_create.called is False
class LocationViewCausedByTradingBlocTestCase(ReportsTestCase):
def setUp(self):
super().setUp()
self.url = reverse("reports:barrier_caused_by_trading_bloc")
session = self.client.session
france_uuid = "82756b9a-5d95-e211-a939-e4115bead28a"
session["draft_barrier__location_form_data"] = {"country": france_uuid}
session.save()
def test_caused_by_trading_bloc_gets_saved_in_session(self):
session_key = "draft_barrier__caused_by_trading_bloc_form_data"
expected_form_data = {"caused_by_trading_bloc": True}
response = self.client.post(
self.url,
data={"caused_by_trading_bloc": "yes"},
follow=True,
)
saved_form_data = self.client.session.get(session_key)
sess = self.client.session
assert HTTPStatus.OK == response.status_code
assert expected_form_data == saved_form_data
class LocationViewHasAdminAreasTestCase(ReportsTestCase):
"""Country with admin areas."""
@patch("reports.helpers.ReportFormGroup.save")
def test_saving_location_redirects_to_correct_view(self, mock_save):
url = reverse("reports:barrier_location")
field_name = "location"
us_uuid = "81756b9a-5d95-e211-a939-e4115bead28a"
redirect_url = reverse("reports:barrier_has_admin_areas")
response = self.client.post(url, data={field_name: us_uuid})
self.assertRedirects(response, redirect_url)
assert mock_save.called is False
def test_country_url_resolves_to_location_view(self):
match = resolve("/reports/new/country/has-admin-areas/")
assert match.func.view_class == NewReportBarrierLocationHasAdminAreasView
def test_has_admin_areas_view_returns_correct_html(self):
url = reverse("reports:barrier_has_admin_areas")
expected_title = "<title>Market Access - Add - Location of the barrier</title>"
expected_radio_container = '<div class="govuk-radios has-admin-areas">'
radio_item = '<div class="govuk-radios__item">'
expected_radio_count = 2
expected_continue_btn = (
'<input type="submit" value="Continue" class="govuk-button">'
)
response = self.client.get(url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert expected_title in html
assert expected_radio_container in html
radio_count = html.count(radio_item)
assert (
expected_radio_count is radio_count
), f"Expected {expected_radio_count} radio items, got: {radio_count}"
assert expected_continue_btn in html
@patch("reports.helpers.ReportFormGroup.save")
def test_has_admin_areas_cannot_be_empty(self, mock_save):
url = reverse("reports:barrier_has_admin_areas")
field_name = "has_admin_areas"
session_key = "draft_barrier__has_admin_areas_form_data"
response = self.client.post(url, data={field_name: ""})
saved_form_data = self.client.session.get(session_key)
html = response.content.decode("utf8")
form = response.context["form"]
assert HTTPStatus.OK == response.status_code
assert form.is_valid() is False
assert field_name in form.errors
assert ERROR_HTML.SUMMARY_HEADER in html
assert ERROR_HTML.REQUIRED_FIELD in html
assert saved_form_data is None
assert mock_save.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_has_admin_areas__option_yes_saved_in_session(self, mock_create):
"""
Question: Does this affect the entire country?
Answer: Yes.
Behaviour: No need to add admin areas, the draft barrier does not get saved.
"""
url = reverse("reports:barrier_has_admin_areas")
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
field_name = "has_admin_areas"
session_key = "draft_barrier__has_admin_areas_form_data"
expected_form_data = {"has_admin_areas": "1"}
response = self.client.post(url, data={field_name: "1"}, follow=True)
saved_form_data = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert expected_form_data == saved_form_data
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_has_admin_areas__option_yes_redirects_to_correct_view(self, mock_create):
"""
Question: Does this affect the entire country?
Answer: Yes.
Behaviour: The form is valid, the user gets redirected to the next step (trade direction).
"""
url = reverse("reports:barrier_has_admin_areas")
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
field_name = "has_admin_areas"
redirect_url = reverse("reports:barrier_trade_direction")
response = self.client.post(url, data={field_name: "1"})
self.assertRedirects(response, redirect_url)
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup.save")
def test_has_admin_areas__option_no_saved_in_session(self, mock_save):
"""
Question: Does this affect the entire country?
Answer: No.
Behaviour: User needs to add admin areas, the barrier is not saved.
"""
url = reverse("reports:barrier_has_admin_areas")
field_name = "has_admin_areas"
session_key = "draft_barrier__has_admin_areas_form_data"
expected_form_data = {"has_admin_areas": "2"}
response = self.client.post(url, data={field_name: "2"}, follow=True)
saved_form_data = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert expected_form_data == saved_form_data
assert mock_save.called is False
@patch("reports.helpers.ReportFormGroup.save")
def test_has_admin_areas__option_no_redirects_to_correct_view(self, mock_save):
"""
Question: Does this affect the entire country?
Answer: No.
Behaviour: User needs to add admin areas.
User gets redirected to add admin area view, draft barrier is not created.
"""
url = reverse("reports:barrier_has_admin_areas")
field_name = "has_admin_areas"
redirect_url = reverse("reports:barrier_add_admin_areas")
response = self.client.post(url, data={field_name: "2"})
self.assertRedirects(response, redirect_url)
assert mock_save.called is False
class LocationViewAddAdminAreasTestCase(ReportsTestCase):
def setUp(self):
super().setUp()
self.url = reverse("reports:barrier_add_admin_areas")
brazil_uuid = "b05f66a0-5d95-e211-a939-e4115bead28a"
session = self.client.session
session["draft_barrier__location_form_data"] = {"country": brazil_uuid}
session.save()
def test_add_admin_areas_url_resolves_to_correct_view(self):
match = resolve("/reports/new/country/admin-areas/add/")
assert match.func.view_class == NewReportBarrierLocationAddAdminAreasView
def test_add_admin_areas_view_returns_correct_html(self):
expected_title = "<title>Market Access - Add - Location of the barrier</title>"
expected_dropdown_container = (
'<select class="govuk-select govuk-!-width-full" id="admin_areas" '
'name="admin_areas">'
)
dropdown_option = '<option class="admin_area_option"'
expected_add_btn = (
'<input type="submit" value="Add admin area" class="govuk-button">'
)
response = self.client.get(self.url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert expected_title in html
assert expected_dropdown_container in html
options_count = html.count(dropdown_option)
assert (
1 <= options_count
), f"Expected at least one admin area option, got: {options_count}"
assert expected_add_btn in html
def test_admin_area_cannot_be_empty(self):
field_name = "admin_areas"
session_key = "draft_barrier__admin_areas_form_data"
response = self.client.post(self.url, data={field_name: ""})
saved_form_data = self.client.session.get(session_key)
html = response.content.decode("utf8")
form = response.context["form"]
assert HTTPStatus.OK == response.status_code
assert form.is_valid() is False
assert field_name in form.errors
assert ERROR_HTML.SUMMARY_HEADER in html
assert ERROR_HTML.REQUIRED_FIELD in html
assert saved_form_data is None
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_adding_admin_area_saved_in_session(self, mock_create):
field_name = "admin_areas"
session_key = "draft_barrier__admin_areas_form_data"
acre_uuid = "b5d03d97-fef5-4da6-9117-98a4d633b581"
expected_admin_areas = {"admin_areas": "b5d03d97-fef5-4da6-9117-98a4d633b581"}
response = self.client.post(self.url, data={field_name: acre_uuid}, follow=True)
saved_admin_areas = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert expected_admin_areas == saved_admin_areas
assert mock_create.called is False
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_adding_admin_area_redirects_to_correct_view(self, mock_create):
field_name = "admin_areas"
acre_uuid = "b5d03d97-fef5-4da6-9117-98a4d633b581"
redirect_url = reverse("reports:barrier_admin_areas")
response = self.client.post(self.url, data={field_name: acre_uuid})
self.assertRedirects(response, redirect_url)
assert mock_create.called is False
class LocationViewAdminAreasTestCase(ReportsTestCase):
def setUp(self):
super().setUp()
self.url = reverse("reports:barrier_admin_areas")
brazil_uuid = "b05f66a0-5d95-e211-a939-e4115bead28a"
session = self.client.session
session["draft_barrier__location_form_data"] = {"country": brazil_uuid}
session.save()
def test_admin_areas_url_resolves_to_correct_view(self):
match = resolve("/reports/new/country/admin-areas/")
assert match.func.view_class == NewReportBarrierAdminAreasView
def test_admin_areas_view_loads_correct_template(self):
response = self.client.get(self.url)
assert HTTPStatus.OK == response.status_code
self.assertTemplateUsed(
response, "reports/new_report_barrier_location_admin_areas.html"
)
def test_admin_areas_view_returns_correct_html(self):
expected_title = "<title>Market Access - Add - Location of the barrier</title>"
expected_header_text = (
'<h3 class="selection-list__heading">Selected admin areas</h3>'
)
admin_area_item = '<li class="selection-list__list__item">'
expected_continue_btn = (
'<input type="submit" value="Continue" class="govuk-button">'
)
add_another_btn = (
'<a href="/reports/new/country/admin-areas/add/" '
'class="govuk-button button--secondary '
'selection-list__add-button">Add another admin area</a>'
)
response = self.client.get(self.url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert expected_title in html
assert expected_header_text in html
assert add_another_btn in html
options_count = html.count(admin_area_item)
assert 0 == options_count, f"Expected 0 admin areas, got: {options_count}"
assert expected_continue_btn in html
def test_admin_areas_view_displays_selected_admin_areas(self):
# set up the session so 2 admin areas were already added
admin_area_item = '<li class="selection-list__list__item">'
expected_admin_areas_count = 2
session = self.client.session
acre_uuid = "b5d03d97-fef5-4da6-9117-98a4d633b581"
bahia_uuid = "5b76e167-a548-4aca-8d49-39c19e646425"
session["draft_barrier__selected_admin_areas"] = f"{acre_uuid}, {bahia_uuid}"
session.save()
response = self.client.get(self.url)
html = response.content.decode("utf8")
options_count = html.count(admin_area_item)
assert (
expected_admin_areas_count == options_count
), f"Expected {expected_admin_areas_count} admin areas, got: {options_count}"
def test_remove_admin_area(self):
remove_url = reverse("reports:barrier_remove_admin_areas")
session_key = "draft_barrier__selected_admin_areas"
acre_uuid = "b5d03d97-fef5-4da6-9117-98a4d633b581"
bahia_uuid = "5b76e167-a548-4aca-8d49-39c19e646425"
area_to_remove = acre_uuid
session = self.client.session
session[session_key] = f"{acre_uuid}, {bahia_uuid}"
session.save()
response = self.client.post(
remove_url, data={"admin_area": area_to_remove}, follow=True
)
selected_admin_areas = self.client.session.get(session_key)
assert HTTPStatus.OK == response.status_code
assert bahia_uuid == selected_admin_areas
@patch("reports.helpers.ReportFormGroup._create_barrier")
def test_button_continue_redirects_to_correct_view(self, mock_create):
"""
Clicking on `Continue` button should proceed to trade directions without saving the barrier
"""
draft_barrier = self.draft_barrier(2)
mock_create.return_value = Report(draft_barrier)
redirect_url = reverse("reports:barrier_trade_direction")
response = self.client.post(self.url, data={})
self.assertRedirects(response, redirect_url)
assert mock_create.called is False
class TradeDirectionViewTestCase(ReportsTestCase):
def | |
<gh_stars>0
# Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.dell import dell_storagecenter_iscsi
import mock
import uuid
LOG = logging.getLogger(__name__)
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanISCSIDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by <NAME>',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
ISCSI_PROPERTIES = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']}
ISCSI_PROPERTIES_EMPTY = {'access_mode': 'rw',
'target_discovered': False,
'target_iqns': [],
'target_luns': [],
'target_portals': []}
def setUp(self):
super(DellSCSanISCSIDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "<PASSWORD>"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
self.driver = dell_storagecenter_iscsi.DellStorageCenterISCSIDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'iSCSI'}
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
self.connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost'}
self.connector_multipath = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost',
'multipath': True}
self.access_record_output = [
"ID Initiator Ipaddress AuthMethod UserName Apply-To",
"--- --------------- ------------- ---------- ---------- --------",
"1 iqn.1993-08.org.debian:01:222 *.*.*.* none both",
" 7dab76162"]
self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001'
self.properties = {
'target_discoverd': True,
'target_portal': '%s:3260'
% self.driver.configuration.dell_sc_iscsi_ip,
'target_iqn': self.fake_iqn,
'volume_id': 1}
self._model_update = {
'provider_location': "%s:3260,1 %s 0"
% (self.driver.configuration.dell_sc_iscsi_ip,
self.fake_iqn)
# ,
# 'provider_auth': 'CHAP %s %s' % (
# self.configuration.eqlx_chap_login,
# self.configuration.eqlx_chap_password)
}
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
12345,
u'opnstktst')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume_failure(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.delete_volume(volume)
mock_delete_volume.assert_called_once_with(12345, self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume_failure(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume,
volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
data = self.driver.initialize_connection(volume, connector)
self.assertEqual(data['driver_volume_type'], 'iscsi')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(12345, self.volume_name)
assert mock_find_volume.call_count == 2
expected = {'data':
{'access_mode': 'rw',
'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_lun': 1,
'target_portal': u'192.168.0.21:3260'},
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_multi_path(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where connection is multipath
volume = {'id': self.volume_name}
connector = self.connector_multipath
data = self.driver.initialize_connection(volume, connector)
self.assertEqual(data['driver_volume_type'], 'iscsi')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(12345, self.volume_name)
assert mock_find_volume.call_count == 2
expected = {'data':
{'access_mode': 'rw',
'target_discovered': False,
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_luns': [1],
'target_portals': [u'192.168.0.21:3260']},
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES_EMPTY)
def test_initialize_connection_no_iqn(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES_EMPTY)
def test_initialize_connection_no_server(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES_EMPTY)
def test_initialize_connection_vol_not_found(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_map_vol_fail(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': self.volume_name}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_server(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_volume(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=False)
def test_terminate_connection_failure(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value='fake')
def test_create_snapshot(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.driver.create_snapshot(snapshot)
self.assertEqual('available', snapshot['status'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_no_volume(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_failure(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': | |
from threading import Thread
from spock.client import Client
from spock.plugins import DefaultPlugins
from spock.plugins.core.event import EventPlugin
from spock.plugins.helpers.clientinfo import ClientInfoPlugin
from spock.plugins.helpers.move import MovementPlugin
from spock.plugins.helpers.world import WorldPlugin
from micropsi_core.world.world import World
from micropsi_core.world.worldadapter import WorldAdapter
from micropsi_core.world.minecraft.spockplugin import MicropsiPlugin
from micropsi_core.world.minecraft.minecraft_graph_locomotion import MinecraftGraphLocomotion
class Minecraft(World):
"""
mandatory: list of world adapters that are supported
"""
supported_worldadapters = [
'MinecraftWorldAdapter',
'MinecraftBraitenberg',
'MinecraftGraphLocomotion'
]
assets = {
'template': 'minecraft/minecraft.tpl',
'js': 'minecraft/minecraft.js',
'x': 256,
'y': 256,
}
# thread and spock only exist once
instances = {
'spock': None,
'thread': None
}
def __init__(self, filename, world_type="Minecraft", name="", owner="", engine=None, uid=None, version=1):
"""
Initializes spock client including MicropsiPlugin, starts minecraft communication thread.
"""
from micropsi_core.runtime import add_signal_handler
# do spock things first, then initialize micropsi world because the latter requires self.spockplugin
# register all necessary spock plugins
# DefaultPlugins contain EventPlugin, NetPlugin, TimerPlugin, AuthPlugin,
# ThreadPoolPlugin, StartPlugin and KeepalivePlugin
plugins = DefaultPlugins
plugins.append(ClientInfoPlugin)
plugins.append(MovementPlugin)
plugins.append(WorldPlugin)
plugins.append(MicropsiPlugin)
# get spock configs
settings = self.get_config()
# add plugin-specific settings
settings['plugins'] = plugins
settings['plugin_settings'] = {
MicropsiPlugin: {
"micropsi_world": self
},
EventPlugin: {
"killsignals": False
}
}
# instantiate spock client if not yet done, which in turn instantiates its plugins
# ( MicropsiPlugin sets self.spockplugin upon instantiation )
if self.instances['spock'] is None:
self.instances['spock'] = Client(plugins=plugins, settings=settings)
if self.instances['thread'] is None:
# start new thread for minecraft comm" which starts spock client
thread = Thread(
target=self.instances['spock'].start,
args=(settings['server'], settings['port']))
# Note: client.start() is attached in StartPlugin w/ setattr(self.client, 'start', self.start)
thread.start()
self.instances['thread'] = thread
#
add_signal_handler(self.kill_minecraft_thread)
# once MicropsiPlugin is instantiated and running, initialize micropsi world
World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version)
# make data accessible to frontend
self.data['assets'] = self.assets
# copied from jonas' code as is
self.current_step = 0
self.first_step = True
self.chat_ping_counter = 0
self.the_image = None
def get_config(self):
"""
Collect config settings required by spock /minecraft as specified in
config.ini.
"""
from configuration import config as cfg
settings = {
'username': cfg['minecraft']['username'],
'password': cfg['minecraft']['password'],
'authenticated': True if cfg['minecraft']['authenticated'] == 'True' else False,
'bufsize': 4096, # size of socket buffer
'sock_quit': True, # stop bot on socket error or hangup
'sess_quit': True, # stop bot on failed session login
'thread_workers': 5, # number of workers in the thread pool
'packet_trace': False,
'mc_username': "test",
'mc_password': "<PASSWORD>",
'server': cfg['minecraft']['server'],
'port': int(cfg['minecraft']['port'])
}
return settings
def kill_minecraft_thread(self, *args):
"""
"""
self.spockplugin.event.kill()
self.instances['thread'].join()
# self.spockplugin.threadpool.shutdown(False)
class Minecraft2D(Minecraft):
""" mandatory: list of world adapters that are supported"""
supported_worldadapters = [
'MinecraftWorldAdapter',
'MinecraftGraphLocomotion'
]
assets = {
'template': 'minecraft/minecraft.tpl',
'js': 'minecraft/minecraft2d.js',
}
def step(self):
"""
Is called on every world step to advance the simulation.
"""
World.step(self)
# a 2D perspective projection
self.get_perspective_projection(self.spockplugin.clientinfo.position)
def get_perspective_projection(self, agent_info):
"""
"""
from math import sqrt
from micropsi_core.world.minecraft import structs
# specs
focal_length = 1 # distance of image plane from projective point
max_dist = 150 # maximum distance for raytracing
resolution = 4 # camera resolution for a specific visual field
im_width = 32 # width of projection /image plane
im_height = 16 # height of projection /image plane
cam_width = 1. # width of viewport /camera coords
cam_height = 1. # height of viewport /camera coords
# save parameters for frontend
self.assets['width'] = im_width * resolution
self.assets['height'] = im_height * resolution
# get agent's position, yaw, and pitch
position = (int(agent_info['x']), int(agent_info['y']), int(agent_info['z']))
yaw = 360 - float(agent_info['yaw']) % 360 # given in degrees
# check which yaw value is straight forward, potentially it's 90, ie. mc yaw + 90
pitch = float(agent_info['pitch']) # given in degrees
# "Yaw is measured in degrees, and does not follow classical trigonometry rules. The unit circle of yaw on
# the XZ-plane starts at (0, 1) and turns counterclockwise, with 90 at (-1, 0), 180 at (0,-1) and 270 at
# (1, 0). Additionally, yaw is not clamped to between 0 and 360 degrees; any number is valid, including
# negative numbers and numbers greater than 360."
# "Pitch is measured in degrees, where 0 is looking straight ahead,
# -90 is looking straight up, and 90 is looking straight down. "
# perspective of particular yaw values
# i get the impression that while the agent turns to the right at eg. 90 degrees yaw
# the proejection i get is turned left #doublecheck!
# 0 -
# 90 -
# 180 -
# 270 -
# perspective of particular pitch values
# 0 - straight ahead
# 90 - straight down
# 180 - upside down straight backwards
# 270 - straight up
# span viewport
tick_w = cam_width / im_width / resolution
tick_h = cam_height / im_height / resolution
# the horizontal plane is split half-half, the vertical plane is shifted upwards wrt the agent's position
h_line = [i for i in self.frange(position[0] - 0.5 * cam_width, position[0] + 0.5 * cam_width, tick_w)]
v_line = [i for i in self.frange(position[1] - 0.05 * cam_height, position[1] + 0.95 * cam_height, tick_h)]
# compute pixel values of image plane
projection = tuple()
x0, y0, z0 = position # agent's position aka projective point
zi = z0 + focal_length
for xi in reversed(h_line):
for yi in reversed(v_line):
distance = 0 # just a counter
block_type = 0
xb, yb, zb = xi, yi, zi
# compute difference vector between projective point and image point
diff = (xi - x0, yi - y0, zi - z0)
# normalize difference vector
magnitude = sqrt(diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2)
if magnitude == 0.:
magnitude = 1.
norm = (diff[0] / magnitude, diff[1] / magnitude, diff[2] / magnitude)
# rotate norm vector
norm = self.rotate_around_x_axis(norm, pitch)
norm = self.rotate_around_y_axis(norm, yaw)
# rotate diff vector
diff = self.rotate_around_x_axis(diff, pitch)
diff = self.rotate_around_y_axis(diff, yaw)
# add diff to projection point aka agent's position
xb, yb, zb = x0 + diff[0], y0 + diff[1], z0 + diff[2]
while block_type <= 0: # which is air
# check block type of next distance point along ray
# aka add normalized difference vector to image point
xb = xb + norm[0]
yb = yb + norm[1]
zb = zb + norm[2]
block_type = self.spockplugin.get_block_type(
int(xb),
int(yb),
int(zb),
)
distance += 1
if distance >= max_dist:
break
# add block name, distance to projection plane
# hm, if block_type unknown, expect an exception
if structs.block_names.get(str(block_type)):
block_name = structs.block_names[str(block_type)]
projection += (block_name, distance)
self.data['projection'] = projection
# problems:
# depending on the depth to compute there's considerable perceptual delay
# things that aren't part of the world cannot be seen from the world data
# ideas:
# increase number of rays per pixel with increasing distance
# make a non-linear image plane, eg. with higher resolution in the middle
def rotate_around_x_axis(self, pos, angle):
""" Rotate a 3D point around the x-axis given a specific angle. """
from math import radians, cos, sin
# convert angle in degrees to radians
theta = radians(angle)
# rotate vector
x = pos[0]
y = pos[1] * cos(theta) - pos[2] * sin(theta)
z = pos[1] * sin(theta) + pos[2] * cos(theta)
return (x, y, z)
def rotate_around_y_axis(self, pos, angle):
""" Rotate a 3D point around the y-axis given a specific angle. """
from math import radians, cos, sin
# convert angle in degrees to radians
theta = radians(angle)
# rotate vector
x = pos[0] * cos(theta) + pos[2] * sin(theta)
y = pos[1]
z = - pos[0] * sin(theta) + pos[2] * cos(theta)
return (x, y, z)
def rotate_around_z_axis(self, pos, angle):
""" Rotate a 3D point around the z-axis given a specific angle. """
from math import radians, cos, sin
# convert angle in degrees to radians
theta = radians(angle)
# rotate vector
x = pos[0] * cos(theta) - pos[1] * sin(theta)
y = pos[0] * sin(theta) + pos[1] * cos(theta)
z = pos[2]
return (x, y, z)
def frange(self, start, end, step):
"""
Range for | |
_get_names(obj, json_paths)
else:
names = []
for path in json_paths:
names += _get_names(obj, path)
# Make the list contain unique names. It keeps the original order in Python 3.6+
# because dicts are ordered. We use the same string for both the name and the value.
pairs = [(name, name) for name in dict.fromkeys(names).keys()]
return Enum(value=class_name, names=pairs, module=module, qualname=qualname, type=base_class) # type: ignore
def _get_names(obj: typing.Dict, path: str) -> typing.List:
json_path_expression = jsonpath_ng.parse(path)
return [match.value for match in json_path_expression.find(obj)]
# This allows other modules to register additional immutable values and types.
# We are doing it this way to overcome issues with import cycles.
additional_immutable_values: typing.Tuple[typing.Any, ...] = ()
additional_immutable_types: typing.Tuple[type, ...] = ()
def make_immutable_copy(obj: typing.Any) -> typing.Any:
"""
Converts a given ``obj`` into an immutable copy of it, if possible.
Parameters
----------
obj:
Object to convert.
Returns
-------
An immutable copy of ``obj``.
"""
if any(obj is immutable_value for immutable_value in additional_immutable_values):
return obj
if isinstance(obj, numpy.matrix):
# One cannot iterate over a matrix segment by segment. You always get back
# a matrix (2D structure) and not an array of rows or columns. By converting
# it to an array such iteration segment by segment works.
obj = numpy.array(obj)
if isinstance(obj, KNOWN_IMMUTABLE_TYPES):
# Because str is among known immutable types, it will not be picked apart as a sequence.
return obj
if additional_immutable_types and isinstance(obj, additional_immutable_types):
return obj
if is_type(obj):
# Assume all types are immutable.
return obj
if isinstance(obj, typing.Mapping):
# We simply always preserve order of the mapping. Because we want to make sure also mapping's
# values are converted to immutable values, we cannot simply use MappingProxyType.
return frozendict.FrozenOrderedDict((make_immutable_copy(k), make_immutable_copy(v)) for k, v in obj.items())
if isinstance(obj, typing.Set):
return frozenset(make_immutable_copy(o) for o in obj)
if isinstance(obj, tuple):
# To preserve named tuples.
return type(obj)(make_immutable_copy(o) for o in obj)
if isinstance(obj, pandas.DataFrame):
return tuple(make_immutable_copy(o) for o in obj.itertuples(index=False, name=None))
if isinstance(obj, (typing.Sequence, numpy.ndarray)):
return tuple(make_immutable_copy(o) for o in obj)
raise TypeError("{obj} is not known to be immutable.".format(obj=obj))
def check_immutable(obj: typing.Any) -> None:
"""
Checks that ``obj`` is immutable. Raises an exception if this is not true.
Parameters
----------
obj:
Object to check.
"""
obj_type = type(obj)
# First check common cases.
if any(obj is immutable_value for immutable_value in additional_immutable_values):
return
if obj_type in KNOWN_IMMUTABLE_TYPES:
return
if obj_type is frozendict.FrozenOrderedDict:
for k, v in obj.items():
check_immutable(k)
check_immutable(v)
return
if obj_type is tuple:
for o in obj:
check_immutable(o)
return
if isinstance(obj, KNOWN_IMMUTABLE_TYPES):
return
if additional_immutable_types and isinstance(obj, additional_immutable_types):
return
if isinstance(obj, tuple):
# To support named tuples.
for o in obj:
check_immutable(o)
return
if is_type(obj):
# Assume all types are immutable.
return
if obj_type is frozenset:
for o in obj:
check_immutable(o)
return
raise TypeError("{obj} is not known to be immutable.".format(obj=obj))
class Metaclass(custom_inherit._DocInheritorBase):
"""
A metaclass which makes sure docstrings are inherited.
It knows how to merge numpy-style docstrings and merge parent sections with
child sections. For example, then it is not necessary to repeat documentation
for parameters if they have not changed.
"""
@staticmethod
def class_doc_inherit(prnt_doc: str = None, child_doc: str = None) -> typing.Optional[str]:
return custom_inherit.store['numpy'](prnt_doc, child_doc)
@staticmethod
def attr_doc_inherit(prnt_doc: str = None, child_doc: str = None) -> typing.Optional[str]:
return custom_inherit.store['numpy'](prnt_doc, child_doc)
class AbstractMetaclass(abc.ABCMeta, Metaclass):
"""
A metaclass which makes sure docstrings are inherited. For use with abstract classes.
"""
class GenericMetaclass(typing.GenericMeta, Metaclass):
"""
A metaclass which makes sure docstrings are inherited. For use with generic classes (which are also abstract).
"""
class RefResolverNoRemote(validators.RefResolver):
def resolve_remote(self, uri: str) -> typing.Any:
raise exceptions.NotSupportedError("Remote resolving disabled: {uri}".format(uri=uri))
def enum_validator(validator, enums, instance, schema): # type: ignore
if isinstance(instance, Enum):
instance = instance.name
yield from validators.Draft7Validator.VALIDATORS['enum'](validator, enums, instance, schema)
def json_schema_is_string(checker: jsonschema.TypeChecker, instance: typing.Any) -> bool:
if isinstance(instance, Enum):
return True
else:
return validators.Draft7Validator.TYPE_CHECKER.is_type(instance, 'string')
def json_schema_is_object(checker: jsonschema.TypeChecker, instance: typing.Any) -> bool:
if isinstance(instance, (frozendict.frozendict, frozendict.FrozenOrderedDict)):
return True
else:
return validators.Draft7Validator.TYPE_CHECKER.is_type(instance, 'object')
def json_schema_is_array(checker: jsonschema.TypeChecker, instance: typing.Any) -> bool:
if isinstance(instance, (tuple, set)):
return True
else:
return validators.Draft7Validator.TYPE_CHECKER.is_type(instance, 'array')
JsonSchemaTypeChecker = validators.Draft7Validator.TYPE_CHECKER.redefine_many({
'string': json_schema_is_string,
'object': json_schema_is_object,
'array': json_schema_is_array,
})
# JSON schema validator with the following extension:
#
# * If a value is an instance of Python enumeration, its name is checked against JSON
# schema enumeration, instead of the value itself. When converting to a proper JSON
# these values should be enumeration's name.
Draft7Validator = validators.extend(
validators.Draft7Validator,
validators={
'enum': enum_validator,
},
type_checker=JsonSchemaTypeChecker,
)
draft7_format_checker = copy.deepcopy(jsonschema.draft7_format_checker)
@draft7_format_checker.checks('python-type')
def json_schema_is_python_type(instance: typing.Any) -> bool:
return is_type(instance) or isinstance(instance, str)
# We cannot use "Draft7Validator" as a type (MyPy complains), so we are using
# "validators.Draft7Validator", which has the same interface.
def load_schema_validators(schemas: typing.Dict, load_validators: typing.Sequence[str]) -> typing.List[validators.Draft7Validator]:
schema_validators = []
for schema_filename in load_validators:
for schema_uri, schema_json in schemas.items():
if os.path.basename(schema_uri) == schema_filename:
break
else:
raise exceptions.InvalidArgumentValueError("Cannot find schema '{schema_filename}'.".format(schema_filename=schema_filename))
# We validate schemas using unmodified validator.
validators.Draft7Validator.check_schema(schema_json)
validator = Draft7Validator(
schema=schema_json,
resolver=RefResolverNoRemote(schema_json['id'], schema_json, schemas),
format_checker=draft7_format_checker,
)
schema_validators.append(validator)
return schema_validators
def datetime_for_json(timestamp: datetime.datetime) -> str:
# Since Python 3.6 "astimezone" can be called on naive instances
# that are presumed to represent system local time.
# We remove timezone information before formatting to not have "+00:00" added and
# we then manually add "Z" instead (which has equivalent meaning).
return timestamp.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat('T') + 'Z'
class JsonEncoder(json.JSONEncoder):
"""
JSON encoder with extensions, among them the main ones are:
* Frozen dict is encoded as a dict.
* Python types are encoded into strings describing them.
* Python enumerations are encoded into their string names.
* Sets are encoded into lists.
* Encodes ndarray and DataFrame as nested lists.
* Encodes datetime into ISO format with UTC timezone.
* Everything else which cannot be encoded is converted to a string.
You probably want to use `to_json_structure` and not this class, because `to_json_structure`
also encodes ``NaN`, ``Infinity``, and ``-Infinity`` as strings.
It does not necessary make a JSON which can then be parsed back to reconstruct original value.
"""
def default(self, o: typing.Any) -> typing.Any:
# Importing here to prevent import cycle.
from d3m.metadata import base
if isinstance(o, numpy.matrix):
# One cannot iterate over a matrix segment by segment. You always get back
# a matrix (2D structure) and not an array of rows or columns. By converting
# it to an array such iteration segment by segment works.
o = numpy.array(o)
if isinstance(o, frozendict.frozendict):
return dict(o)
if isinstance(o, frozendict.FrozenOrderedDict):
return collections.OrderedDict(o)
if is_type(o):
return type_to_str(o)
if isinstance(o, Enum):
return o.name
if o is base.ALL_ELEMENTS:
return repr(o)
if o is base.NO_VALUE:
return repr(o)
# For encoding numpy.int64, numpy.float64 already works.
if isinstance(o, numpy.integer):
return int(o)
if isinstance(o, numpy.bool_):
return bool(o)
if isinstance(o, typing.Mapping):
return collections.OrderedDict(o)
if isinstance(o, typing.Set):
return sorted(o, key=str)
if isinstance(o, pandas.DataFrame):
return list(o.itertuples(index=False, name=None))
if isinstance(o, (typing.Sequence, numpy.ndarray)):
return list(o)
if isinstance(o, decimal.Decimal):
return float(o)
if isinstance(o, bytes):
return base64.b64encode(o).decode('utf8')
if isinstance(o, datetime.datetime):
return datetime_for_json(o)
try:
return super().default(o)
except TypeError:
return str(o)
def normalize_numbers(obj: typing.Dict) -> typing.Dict:
return json.loads(json.dumps(obj), parse_int=float)
json_constant_map = {
'-Infinity': str(float('-Infinity')),
'Infinity': str(float('Infinity')),
'NaN': str(float('NaN')),
}
def to_json_structure(obj: typing.Any) -> typing.Any:
"""
In addition to what `JsonEncoder` encodes, this function also encodes as strings
float ``NaN``, ``Infinity``, and ``-Infinity``.
It does not necessary make a JSON structure which can then be parsed back to reconstruct
original value. For that use ``to_reversible_json_structure``.
"""
# We do not use "allow_nan=False" here because we will handle those values during loading.
# "JsonEncoder.default" is not called for float values so we cannot handle them there.
# See: https://bugs.python.org/issue36841
json_string = json.dumps(obj, cls=JsonEncoder)
return json.loads(
json_string,
parse_constant=lambda constant: json_constant_map[constant],
)
def _json_key(key: typing.Any) -> str:
if isinstance(key, str):
return key
else:
raise TypeError("Key must be a string, not '{key_type}'.".format(key_type=type(key)))
def to_reversible_json_structure(obj: typing.Any) -> typing.Any:
"""
Operation is not idempotent.
"""
if isinstance(obj, (str, bool, NONE_TYPE)):
return obj
obj_type = type(obj)
if _is_int(obj_type):
# To make sure it is Python int.
obj = int(obj)
return obj
elif _is_float(obj_type):
# To make sure it is Python float.
obj = float(obj)
if not numpy.isfinite(obj):
return {
'encoding': 'pickle',
'description': str(obj),
'value': base64.b64encode(pickle.dumps(obj)).decode('utf8'),
}
else:
return obj
elif isinstance(obj, typing.Mapping):
if 'encoding' in | |
<filename>apps/site/api/tests/record_tests.py<gh_stars>1-10
from django import test
from rest_framework.settings import api_settings
from localground.apps.site.api import views
from localground.apps.site import models
from localground.apps.site.api.tests.base_tests import ViewMixinAPI
import urllib
import json
from rest_framework import status
from django.contrib.gis.geos import GEOSGeometry
from localground.apps.site.tests import Client, ModelMixin
def get_metadata():
return {
'description':
{'read_only': False, 'required': False, 'type': 'string'},
'tags': {'read_only': False, 'required': False, 'type': 'field'},
'url': {'read_only': True, 'required': False, 'type': 'field'},
'overlay_type': {'read_only': True, 'required': False,
'type': 'field'},
'geometry': {'read_only': False, 'required': False, 'type': 'geojson'},
'owner': {'read_only': True, 'required': False, 'type': 'field'},
"project_id": {"type": "field", "required": True, "read_only": False},
'id': {'read_only': True, 'required': False, 'type': 'integer'},
'name': {'read_only': False, 'required': False, 'type': 'string'},
'extras': {'read_only': False, 'required': False, 'type': 'json'},
'dataset': {'read_only': True, 'required': False, 'type': 'field'},
# 'media': {'read_only': True, 'required': False, 'type': 'field'},
'attached_photos_videos': {
'read_only': True, 'required': False, 'type': 'field'},
'attached_audio': {
'read_only': True, 'required': False, 'type': 'field'},
'attached_map_images': {
'read_only': True, 'required': False, 'type': 'field'},
'field_1': {'read_only': False, 'required': False, 'type': 'string'},
'field_2': {'read_only': False, 'required': False, 'type': 'integer'},
'field_3': {'read_only': False, 'required': False, 'type': 'datetime'},
'field_4': {'read_only': False, 'required': False, 'type': 'field'},
'field_5': {'read_only': False, 'required': False, 'type': 'float'},
'field_6': {'read_only': False, 'required': False, 'type': 'choice'},
'field_7': {'read_only': False, 'required': False, 'type': 'string'}
}
class DataMixin(object):
Point = {
"type": "Point",
"coordinates": [12.492324113849, 41.890307434153]
}
LineString = {
"type": "LineString",
"coordinates": [[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]]
}
Polygon = {
"type": "Polygon",
"coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]]
}
Crazy1 = {
"type": "Polygon1",
"coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]]
}
Crazy2 = {
"type": "Polygon",
"coordinates": [[[100.0, 0.0, 6, 8], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]]
}
ExtrasGood = '''{
"source": "http://google.com",
"video": "youtube.com",
"order": 5
}'''
ExtrasBad = '''{
"source": "http://google.com",
"video",
"order": 5
}'''
class APIRecordListTest(test.TestCase, ViewMixinAPI, DataMixin):
def setUp(self):
ViewMixinAPI.setUp(self)
self.view = views.RecordList.as_view()
self.metadata = get_metadata()
self.dataset = self.create_dataset_with_fields(num_fields=7)
self.markerwattrs = self.create_record(
self.user, self.project, dataset=self.dataset)
self.urls = ['/api/0/datasets/%s/data/' % self.dataset.id]
def tearDown(self):
pass
# delete method also removes files from file system:
models.Photo.objects.all().delete()
models.Audio.objects.all().delete()
def test_post_individual_attrs(self):
for d in [
{'field_1': 'field_1 text'},
{'field_2': 77},
{'field_3': "2012-09-04 06:00:00"},
{'field_4': True},
{'field_5': 43124.543252},
{'field_6': 2},
{'field_7': 'Independent'}
]:
default_data = {
'project_id': self.project.id
}
default_data.update(d)
urls = self.urls
for url in urls:
url = url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(default_data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
new_marker = self.dataset.get_records().order_by('-id',)[0]
self.assertEqual(
response.data[d.keys()[0]], d.values()[0]
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_post_many_attributes(self):
hstore_data = {
'field_1': 'field_1 text',
'field_2': 77,
'field_3': '2012-09-04 06:00:00',
'field_4': True,
'field_5': 43124.543252,
'field_6': 2,
'field_7': 'Independent'
}
dict_len = len(hstore_data)
data = {
'project_id': self.project.id
}
data.update(hstore_data)
urls = self.urls
for url in urls:
url = url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
new_marker = self.dataset.get_records().order_by('-id',)[0]
'''
for i in range(0, dict_len):
self.assertEqual(
new_marker.attributes[
hstore_data.keys()[i]], hstore_data.values()[i]
)
'''
for i in range(0, dict_len):
self.assertEqual(
response.data[hstore_data.keys()[i]],
hstore_data.values()[i]
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_post_fails_w_invalid_attrs(self):
for d in [
{'field_2': 'some text'},
{'field_3': '199012-31T243:59:60Z'},
{'field_4': 'invalid text'},
{'field_5': 'invalid text'},
{'field_6': 'nothing'}
]:
default_data = {
'project_id': self.project.id
}
default_data.update(d)
urls = self.urls
for url in urls:
url = url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(default_data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
new_marker = self.dataset.get_records().order_by('-id',)[0]
'''
self.assertEqual(
new_marker.attributes[d.keys()[0]], d.values()[0]
)
'''
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST)
def test_allow_null_posts(self):
# Test a range of sparse options where zero or one value is populated.
# Then loop through each option and see if it works:
hstores = [
{}, # no values given (the rest are null)
{'field_1': 'Hi!'}, # only field_1 has a value, the rest null
{'field_2': 5},
{'field_3': '2012-09-04 06:00:00'},
{'field_4': False},
{'field_5': 3.14159},
{'field_6': 2},
{'field_7': 'Independent'}
]
for url in self.urls:
for hstore_data in hstores:
data = {
'project_id': self.project.id,
'name': 'test'
}
data.update(hstore_data)
response = self.client_user.post(
url + '?project_id={0}'.format(self.project.id),
data=urllib.urlencode(data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
d = response.data
for key in hstore_data.keys():
self.assertEqual(d.get('project_id'), self.project.id)
self.assertEqual(d.get('name'), 'test')
self.assertEqual(d.get(key), hstore_data[key])
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_page_200_status_basic_user(self, urls=None, **kwargs):
for url in self.urls:
response = self.client_user.get(url, {
'project_id': self.project.id
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_bad_json_creates_fails(self, **kwargs):
# 1. define a series of bad JSON dictionaries
for d in [
{'geometry': self.Crazy1},
{'geometry': self.Crazy2},
{'extras': self.ExtrasBad}
]:
params = {
'name': '<NAME>',
'caption': 'Test description',
'geometry': self.Point,
'project_id': self.project.id,
'extras': self.ExtrasGood
}
# 2. update the params dictionary with the invalid dictionary entry
params.update(d)
for i, url in enumerate(self.urls):
url = url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(params),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded")
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST)
# new_marker.name always returns 'None'
def test_create_marker_point_line_poly_using_post(self, **kwargs):
for i, url in enumerate(self.urls):
name = 'MWA'
description = 'Test description1'
for k in ['Point', 'LineString', 'Polygon']:
geom = getattr(self, k)
response = self.client_user.post(
url,
data=urllib.urlencode({
'geometry': geom,
'name': name,
'caption': description,
'project_id': self.project.id,
'extras': self.ExtrasGood
}),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_marker = self.dataset.get_records().order_by('-id',)[0]
self.assertEqual(
new_marker.geometry,
GEOSGeometry(
json.dumps(geom)))
self.assertEqual(k, new_marker.geometry.geom_type)
self.assertEqual(new_marker.project.id, self.project.id)
self.assertEqual(
new_marker.extras, json.loads(self.ExtrasGood)
)
class APIRecordInstanceTest(test.TestCase, ViewMixinAPI, DataMixin):
def setUp(self):
ViewMixinAPI.setUp(self)
self.view = views.RecordInstance.as_view()
self.dataset = self.create_dataset_with_fields(num_fields=7)
self.metadata = get_metadata()
self.metadata.update({
'project_id':
{'read_only': True, 'required': True, 'type': 'field'}
})
self.markerwattrs = self.create_record(
self.user, self.project, dataset=self.dataset)
self.urls = [
'/api/0/datasets/%s/data/%s/' %
(self.markerwattrs.dataset.id, self.markerwattrs.id)
]
self.list_url = '/api/0/datasets/%s/data/' % self.dataset.id
self.hstore_data = [
{'field_1': 'field_1 text'},
{'field_2': 77},
{'field_3': '2012-09-04 06:00:00'},
{'field_4': True},
{'field_5': 43124.543252},
{'field_6': 2},
{'field_7': 'Independent'}
]
def tearDown(self):
# delete method also removes files from file system:
models.Photo.objects.all().delete()
models.Audio.objects.all().delete()
def post_hstore_data_all(self, hstore_data):
default_data = {
'project_id': self.project.id,
'geometry': self.Point,
'caption': 'this is the caption text'
}
default_data.update(hstore_data)
url = self.list_url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(default_data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
new_marker = self.dataset.get_records().order_by('-id',)[0]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
return new_marker
def post_hstore_data(self, hstore_data):
mwa_ids = []
posted_data = {}
for d in hstore_data:
default_data = {
'project_id': self.project.id,
'geometry': self.Point,
'caption': 'this is the caption text'
}
default_data.update(d)
url = self.list_url + '?project_id={0}'.format(self.project.id)
response = self.client_user.post(
url,
data=urllib.urlencode(default_data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded"
)
new_marker = self.dataset.get_records().order_by('-id',)[0]
self.assertEqual(
response.data[d.keys()[0]], d.values()[0]
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# store values for upcoming test
mwa_ids.append(new_marker.id)
posted_data[new_marker.id] = [d.keys()[0], d.values()[0]]
# return some information about the newly created markers
return mwa_ids, posted_data
def test_get(self):
# run self.post_hstore_data() and get info
mwa_ids, posted_data = self.post_hstore_data(self.hstore_data)
# now, test GET for each new marker
for marker_id in mwa_ids:
response = self.client_user.get(self.list_url + '%s/' % marker_id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test contains the key/attribute
self.assertTrue(posted_data[marker_id][0] in response.data)
# test key/attribute value is correct
# having to cast to string and make everything lowercase to
# get matches...seems like the wrong approach
self.assertEqual(
response.data[posted_data[marker_id][0]],
posted_data[marker_id][1])
def test_put(self):
# run self.post_hstore_data() and get info
mwa_ids, posted_data = self.post_hstore_data(self.hstore_data)
new_hstore_data_dict = {
'field_1': 'new field_1 text',
'field_2': 99,
'field_3': '2012-09-04 07:00:00',
'field_4': False,
'field_5': 7777.7777,
'field_6': 1,
'field_7': 'Democrat'
}
# now, test PUT for each new marker (replace)
for marker_id in mwa_ids:
# first just check for some pre-existing default data
marker = models.Record.objects.get(id=marker_id)
url = self.list_url + '%s/' % marker_id
key = posted_data[marker_id][0]
new_data_item = new_hstore_data_dict[key]
new_data = {
key: new_data_item
}
response = self.client_user.put(
url,
data=urllib.urlencode(new_data),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test that contains the hstore key/attribute
self.assertTrue(posted_data[marker_id][0] in response.data)
# test hstore key/attribute value is correct
self.assertEqual(
response.data[posted_data[marker_id][0]],
new_data_item)
# finally, check that other fields are replaced (nulled)
marker = models.Record.objects.get(id=marker_id)
def test_patch(self):
marker = self.post_hstore_data_all({
'field_1': 'field_1 text',
'field_2': 77,
'field_3': '2012-09-04 06:00:00',
'field_4': True,
'field_5': 43124.543252,
'field_6': 2,
'field_7': 'Independent'
})
self.assertEqual(
json.loads(marker.geometry.geojson),
self.Point
)
url = self.list_url + '%s/' % marker.id
new_data = {
'field_1': 'new field_1 text',
'field_2': 99,
'field_3': '2012-09-04 07:00:00',
'field_4': False,
'field_5': 7777.7777,
'field_6': 1,
'field_7': 'Democrat'
}
for key in new_data:
response = self.client_user.patch(
url,
data=urllib.urlencode({
key: new_data[key]
}),
HTTP_X_CSRFTOKEN=self.csrf_token,
content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test contains the hstore key/attribute
self.assertTrue(key in response.data)
# test hstore key/attribute value is correct
self.assertEqual(response.data[key], new_data[key])
# finally, check that other fields have not been replaced (nulled)
marker = models.Record.objects.get(id=marker.id)
self.assertEqual(json.loads(marker.geometry.geojson), self.Point)
self.assertEqual(response.data['geometry'], self.Point)
# Check that at the end of this process, all key-value
# pairs have persisted:
response = self.client_user.get(url)
for key | |
# -*- coding: utf-8 -*-
""" Playground documentation.
Module defining Playground Base Class
"""
import os
from abc import ABC
import yaml
import pymunk
from .utils import PositionAreaSampler
from .utils.definitions import SPACE_DAMPING, CollisionTypes, SceneElementTypes
# pylint: disable=unused-argument
# pylint: disable=line-too-long
class Playground(ABC):
""" Playground is a Base Class that manages the physical simulation.
Playground manages the interactions between Agents and Scene Elements.
Attributes:
size: size of the scene (width, length).
scene_elements: list of SceneElements present in the Playground.
fields: list of fields producing SceneElements in the Playground.
agents: list of Agents present in the Playground.
initial_agent_position: position or PositionAreaSampler,
Starting position of an agent (single agent).
done: bool, True if the playground reached termination.
"""
# pylint: disable=too-many-instance-attributes
scene_entities = []
def __init__(self, size):
# Generate Scene
self.size = size
self._width, self._length = self.size
# Initialization of the pymunk space, modelling all the physics
self.space = self._initialize_space()
# Public attributes for entities in the playground
self.scene_elements = []
self.fields = []
self.agents = []
# Private attributes for managing interactions in playground
self._disappeared_scene_elements = []
self._grasped_scene_elements = {}
self._teleported = []
# Add entities declared in the scene
for scene_entity in self.scene_entities:
self.add_scene_element(scene_entity)
self.done = False
self.initial_agent_position = None
self._handle_interactions()
self.time_limit = None
self.time_limit_reached_reward = None
self.time_test = 0
@staticmethod
def parse_configuration(key):
""" Private method that parses yaml configuration files.
Args:
key: (str) name of the playground configuration.
Returns:
Dictionary of attributes and default values.
"""
fname = 'utils/configs/playground.yml'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, fname), 'r') as yaml_file:
default_config = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return default_config[key]
@staticmethod
def _initialize_space():
""" Method to initialize Pymunk empty space for 2D physics.
Returns: Pymunk Space
"""
space = pymunk.Space()
space.gravity = pymunk.Vec2d(0., 0.)
space.damping = SPACE_DAMPING
return space
def update(self, steps):
""" Update the Playground
Update all SceneElements, Fields, Timers and Grasps
Runs the Physics engine for n steps.
Args:
steps: Number of steps
"""
for agent in self.agents:
agent.pre_step()
for _ in range(steps):
self.space.step(1. / steps)
for elem in self.scene_elements:
elem.pre_step()
if elem.follows_waypoints:
self.space.reindex_shapes_for_body(elem.pm_body)
self._fields_produce()
self._check_timers()
self._release_grasps()
self._check_teleports()
def reset(self):
""" Reset the Playground to its initial state.
"""
# remove entities and filter out entities which are temporary
for entity in self.scene_elements.copy():
self.remove_scene_element(entity)
# reset and replace entities that are not temporary
for entity in self._disappeared_scene_elements.copy():
entity.reset()
self.add_scene_element(entity)
# reset fields
for entity in self.fields:
entity.reset()
# reset agents
for agent in self.agents.copy():
agent.reset()
self.remove_agent(agent)
self.add_agent(agent)
self.done = False
def add_agent(self, new_agent, tries=100):
""" Method to add an Agent to the Playground.
If the Agent has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Args:
new_agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the agent
"""
# If already there
if new_agent in self.scene_elements:
raise ValueError('Agent already in Playground')
# Inform agent of the playground size
new_agent.size_playground = self.size
if new_agent.allow_overlapping:
self._add_agent(new_agent)
else:
success = self._add_agent_without_ovelapping(new_agent, tries = tries)
if not success:
raise ValueError("Agent couldn't be placed without overlapping")
def _add_agent(self, agent):
""" Add an agent to the playground.
Args:
agent: Agent.
"""
self.agents.append(agent)
if agent.initial_position is not None:
pass
elif self.initial_agent_position is not None:
agent.initial_position = self.initial_agent_position
else:
raise ValueError("""Agent initial position should be defined in the playground or passed as an argument)
to the class agent""")
agent.position = agent.initial_position
for body_part in agent.parts:
self.space.add(*body_part.pm_elements)
def _add_agent_without_ovelapping(self, agent, tries=100):
""" Method to add am Agent to the Playground without overlapping.
Useful when an Agent has a random initial position, to avoid overlapping.
Args:
agent: Agent to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
trial = 0
visible_collide_parts = True
interactive_collide_parts = True
all_shapes = self.space.shapes.copy()
while (interactive_collide_parts or visible_collide_parts) and trial < tries:
self._add_agent(agent)
visible_collide_parts = False
interactive_collide_parts = False
for part in agent.parts:
visible_collide = False
interactive_collide = False
if part.pm_visible_shape is not None:
collisions = [part.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if part.pm_interaction_shape is not None:
collisions = [part.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
visible_collide_parts = visible_collide or visible_collide_parts
interactive_collide_parts = interactive_collide or interactive_collide_parts
if visible_collide_parts or interactive_collide_parts:
self.remove_agent(agent)
trial += 1
if interactive_collide_parts or visible_collide_parts:
return False
return True
def _add_scene_element(self, new_scene_element, new_position):
""" Method to add a SceneElement to the Playground.
"""
if new_scene_element in self.scene_elements:
raise ValueError('Scene element already in Playground')
new_scene_element.size_playground = self.size
if new_position:
new_scene_element.position = new_scene_element.initial_position
self.space.add(*new_scene_element.pm_elements)
self.scene_elements.append(new_scene_element)
if new_scene_element in self._disappeared_scene_elements:
self._disappeared_scene_elements.remove(new_scene_element)
def _add_scene_element_without_ovelapping(self, scene_element, tries, new_position):
trial = 0
visible_collide = True
interactive_collide = True
all_shapes = self.space.shapes.copy()
while (visible_collide or interactive_collide) and trial < tries:
self._add_scene_element(scene_element, new_position)
visible_collide = False
interactive_collide = False
if scene_element.pm_visible_shape is not None:
collisions = [scene_element.pm_visible_shape.shapes_collide(shape) for shape in all_shapes]
visible_collide = any([len(collision.points) != 0 for collision in collisions])
if scene_element.pm_interaction_shape is not None:
collisions = [scene_element.pm_interaction_shape.shapes_collide(shape) for shape in all_shapes]
interactive_collide = any([len(collision.points) != 0 for collision in collisions])
if visible_collide or interactive_collide:
self.remove_scene_element(scene_element)
trial += 1
if visible_collide or interactive_collide:
return False
return True
def add_scene_element(self, scene_element, tries=100, new_position=True):
""" Method to add a SceneElement to the Playground.
If the Element has its attribute allow_overlapping set to False,
the playground will try to add it multiple times.
Useful when a SceneElement has a random initial position, to avoid overlapping.
Args:
scene_element: Scene Element to add to the Playground
tries: Number of times the Playground will try to place the new_entity
"""
if scene_element.entity_type is SceneElementTypes.FIELD:
# If already there
if scene_element in self.fields:
raise ValueError('Field already in Playground')
self.fields.append(scene_element)
else:
if scene_element in self.scene_elements:
raise ValueError('Field already in Playground')
# Else
scene_element.size_playground = self.size
if scene_element.allow_overlapping:
self._add_scene_element(scene_element, new_position)
else:
success = self._add_scene_element_without_ovelapping(scene_element, tries = tries, new_position=new_position)
if not success:
raise ValueError('Entity could not be placed without overlapping')
def _remove_agents(self):
for agent in self.agents:
self.remove_agent(agent)
def remove_agent(self, agent):
if agent not in self.agents:
return False
for part in agent.parts:
self.space.remove(*part.pm_elements)
part.velocity = [0, 0, 0]
part.grasped = []
agent.initial_position = None
self.agents.remove(agent)
return True
def remove_scene_element(self, scene_element):
if scene_element not in self.scene_elements:
return False
self.space.remove(*scene_element.pm_elements)
self.scene_elements.remove(scene_element)
if not scene_element.is_temporary_entity:
self._disappeared_scene_elements.append(scene_element)
for elem in self.scene_elements:
if elem.entity_type == 'dispenser' and scene_element in elem.produced_entities:
elem.produced_entities.remove(scene_element)
for field in self.fields:
if scene_element in field.produced_entities:
field.produced_entities.remove(scene_element)
if scene_element in self._grasped_scene_elements.keys():
body_part = self._grasped_scene_elements[scene_element]
self.space.remove(*body_part.grasped)
body_part.grasped = []
# self._grasped_scene_elements.pop(scene_element)
return True
def _fields_produce(self):
for field in self.fields:
if field.can_produce():
new_entity = field.produce()
self.add_scene_element(new_entity)
def _check_timers(self):
for entity in self.scene_elements:
if entity.timed and entity.timer == 0:
list_remove, list_add = entity.activate(self)
for entity_removed in list_remove:
self.remove_scene_element(entity_removed)
for entity_added in list_add:
self.add_scene_element(entity_added)
def _release_grasps(self):
for agent in self.agents:
for part in agent.parts:
if not part.is_holding and part.can_grasp:
for joint in part.grasped:
self.space.remove(joint)
part.grasped = []
for element_grasped, part in self._grasped_scene_elements.copy().items():
if not part.grasped:
self._grasped_scene_elements.pop(element_grasped)
def _check_teleports(self):
for agent, teleport in self._teleported:
overlaps = self.agent_overlaps_with_element(agent, teleport)
if not overlaps:
self._teleported.remove((agent, teleport))
def agent_overlaps_with_element(self, agent, element):
overlaps = False
for part in agent.parts:
if element.pm_visible_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_visible_shape).points != []
if element.pm_interaction_shape is not None:
overlaps = overlaps or part.pm_visible_shape.shapes_collide(element.pm_interaction_shape).points != []
return overlaps
def get_scene_element_from_shape(self, pm_shape):
"""
Returns: Returns the Scene Element associated with the pymunk shape.
"""
entity = next(iter([e for e in self.scene_elements if pm_shape in e.pm_elements]), None)
return entity
def get_agent_from_shape(self, pm_shape):
"""
Returns: Returns the Agent associated with the pymunk shape.
"""
for agent in self.agents:
if agent.owns_shape(pm_shape):
return agent
return None
def get_entity_from_shape(self, pm_shape):
"""
Returns the element associated with the pymunk shape
Args:
pm_shape: Pymunk shaape
Returns:
Single entitiy or None
"""
scene_element = self.get_scene_element_from_shape(pm_shape)
if scene_element is not None: return scene_element
for agent in self.agents:
part = agent.get_bodypart_from_shape(pm_shape)
if part is not None: return part
return None
def _get_closest_agent(self, ent):
dist_list = [(a.position[0] - ent.position[0])**2 + (a.position[1] - ent.position[1])**2 for a in self.agents]
index_min_dist = dist_list.index(min(dist_list))
closest_agent = self.agents[index_min_dist]
| |
<filename>smarts/core/smarts.py
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import importlib.resources as pkg_resources
import logging
import os
import warnings
from collections import defaultdict
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple
import numpy as np
from scipy.spatial.distance import cdist
from envision import types as envision_types
from envision.client import Client as EnvisionClient
from smarts import VERSION
from smarts.core.chassis import BoxChassis
from smarts.core.plan import Plan
from . import models
from .agent_interface import AgentInterface
from .agent_manager import AgentManager
from .bubble_manager import BubbleManager
from .colors import SceneColors
from .controllers import ActionSpaceType, Controllers
from .coordinates import BoundingBox, Point
from .external_provider import ExternalProvider
from .motion_planner_provider import MotionPlannerProvider
from .provider import Provider, ProviderRecoveryFlags, ProviderState
from .road_map import RoadMap
from .scenario import Mission, Scenario
from .sensors import Collision, Observation
from .sumo_traffic_simulation import SumoTrafficSimulation
from .traffic_history_provider import TrafficHistoryProvider
from .trajectory_interpolation_provider import TrajectoryInterpolationProvider
from .trap_manager import TrapManager
from .utils import pybullet
from .utils.id import Id
from .utils.math import rounder_for_dt
from .utils.pybullet import bullet_client as bc
from .utils.visdom_client import VisdomClient
from .vehicle import Vehicle, VehicleState
from .vehicle_index import VehicleIndex
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s: {%(module)s} %(message)s",
datefmt="%Y-%m-%d,%H:%M:%S",
level=logging.ERROR,
)
MAX_PYBULLET_FREQ = 240
class SMARTSNotSetupError(Exception):
"""Represents a case where SMARTS cannot operate because it is not set up yet."""
pass
class SMARTSDestroyedError(Exception):
"""Represents a case where SMARTS cannot operate because it is destroyed."""
pass
class SMARTS:
"""The core SMARTS simulator. This is the direct interface to all parts of the simulation.
Args:
agent_interfaces: The interfaces providing SMARTS with the understanding of what features each agent needs.
traffic_sim: The traffic simulator for providing non-agent traffic.
envision: An envision client for connecting to an envision visualization server.
visdom: A visdom client for connecting to a visdom visualization server.
fixed_timestep_sec: The fixed timestep that will be default if time is not otherwise specified at step.
reset_agents_only: When specified the simulation will continue use of the current scenario.
zoo_addrs: The (ip:port) values of remote agent workers for externally hosted agents.
external_provider: Creates a special provider `SMARTS.external_provider` that allows for inserting state.
config: The simulation configuration file for unexposed configuration.
"""
def __init__(
self,
agent_interfaces: Dict[str, AgentInterface],
traffic_sim, # SumoTrafficSimulation
envision: Optional[EnvisionClient] = None,
visdom: Optional[VisdomClient] = None,
fixed_timestep_sec: Optional[float] = 0.1,
reset_agents_only: bool = False,
zoo_addrs: Optional[Tuple[str, int]] = None,
external_provider: bool = False,
):
self._log = logging.getLogger(self.__class__.__name__)
self._sim_id = Id.new("smarts")
self._is_setup = False
self._is_destroyed = False
self._scenario: Optional[Scenario] = None
self._renderer = None
self._envision: Optional[EnvisionClient] = envision
self._visdom: Optional[VisdomClient] = visdom
self._traffic_sim = traffic_sim
self._external_provider: ExternalProvider = None
self._resetting = False
self._reset_required = False
assert fixed_timestep_sec is None or fixed_timestep_sec > 0
self.fixed_timestep_sec: Optional[float] = fixed_timestep_sec
self._last_dt = fixed_timestep_sec
self._elapsed_sim_time = 0
self._total_sim_time = 0
self._step_count = 0
self._motion_planner_provider = MotionPlannerProvider()
self._traffic_history_provider = TrafficHistoryProvider()
self._trajectory_interpolation_provider = TrajectoryInterpolationProvider()
self._provider_recovery_flags: Dict[Provider, ProviderRecoveryFlags] = {}
self._providers: List[Provider] = []
self.add_provider(
self._motion_planner_provider,
)
self.add_provider(
self._traffic_history_provider,
)
self.add_provider(
self._trajectory_interpolation_provider,
)
if self._traffic_sim:
self._insert_provider(
0,
self._traffic_sim,
recovery_flags=ProviderRecoveryFlags.EPISODE_REQUIRED
| ProviderRecoveryFlags.ATTEMPT_RECOVERY,
)
if external_provider:
self._external_provider = ExternalProvider(self)
self._insert_provider(0, self._external_provider)
# We buffer provider state between steps to compensate for TRACI's timestep delay
self._last_provider_state = None
self._reset_agents_only = reset_agents_only # a.k.a "teleportation"
self._imitation_learning_mode = False
# For macOS GUI. See our `BulletClient` docstring for details.
# from .utils.bullet import BulletClient
# self._bullet_client = BulletClient(pybullet.GUI)
self._bullet_client = bc.BulletClient(pybullet.DIRECT)
self._dynamic_action_spaces = {
ActionSpaceType.Continuous,
ActionSpaceType.Lane,
ActionSpaceType.ActuatorDynamic,
ActionSpaceType.LaneWithContinuousSpeed,
ActionSpaceType.Trajectory,
ActionSpaceType.MPC,
# ActionSpaceType.Imitation,
}
# Set up indices
self._agent_manager = AgentManager(agent_interfaces, zoo_addrs)
self._vehicle_index = VehicleIndex()
# TODO: Should not be stored in SMARTS
self._vehicle_collisions = defaultdict(list) # list of `Collision` instances
self._vehicle_states = []
self._bubble_manager = None
self._trap_manager: Optional[TrapManager] = None
self._ground_bullet_id = None
self._map_bb = None
def step(
self,
agent_actions: Dict[str, Any],
time_delta_since_last_step: Optional[float] = None,
) -> Tuple[
Dict[str, Observation],
Dict[str, float],
Dict[str, bool],
Dict[str, Dict[str, float]],
]:
"""Progress the simulation by a fixed or specified time.
Args:
agent_actions:
Actions that the agents want to perform on their actors.
time_delta_since_last_step:
Overrides the simulation step length. Progress simulation time by the given amount.
Note the time_delta_since_last_step param is in (nominal) seconds.
Returns:
observations, rewards, dones, infos
"""
if not self._is_setup:
raise SMARTSNotSetupError("Must call reset() or setup() before stepping.")
self._check_valid()
assert not (
self._fixed_timestep_sec and time_delta_since_last_step
), "cannot switch from fixed- to variable-time steps mid-simulation"
try:
return self._step(agent_actions, time_delta_since_last_step)
except (KeyboardInterrupt, SystemExit):
# ensure we clean-up if the user exits the simulation
self._log.info("Simulation was interrupted by the user.")
self.destroy()
raise # re-raise the KeyboardInterrupt
except Exception as e:
self._log.error(
"Simulation crashed with exception. Attempting to cleanly shutdown."
)
self._log.exception(e)
self.destroy()
raise # re-raise
def _check_if_acting_on_active_agents(self, agent_actions):
for agent_id in agent_actions.keys():
if agent_id not in self._agent_manager.ego_agent_ids:
self._log.warning(
f"Attempted to perform actions on non-existing agent, {agent_id} "
)
def _step(self, agent_actions, time_delta_since_last_step: Optional[float] = None):
"""Steps through the simulation while applying the given agent actions.
Returns the observations, rewards, done, and infos signals.
"""
# Due to a limitation of our traffic simulator(SUMO) interface(TRACI), we can
# only observe traffic state of the previous simulation step.
#
# To compensate for this, we:
#
# 0. Advance the simulation clock
# 1. Fetch social agent actions
# 2. Step all providers and harmonize state
# 3. Step bubble manager
# 4. Calculate observation and reward
# 5. Send observations to social agents
# 6. Clear done agents
# 7. Perform visualization
#
# In this way, observations and reward are computed with data that is
# consistently with one step of latency and the agent will observe consistent
# data.
# 0. Advance the simulation clock.
# It's been this long since our last step.
self._last_dt = time_delta_since_last_step or self._fixed_timestep_sec or 0.1
self._elapsed_sim_time = self._rounder(self._elapsed_sim_time + self._last_dt)
# 1. Fetch agent actions
self._log.info("Fetching agent actions")
all_agent_actions = self._agent_manager.fetch_agent_actions(self, agent_actions)
# 2. Step all providers and harmonize state
self._log.info("Stepping all providers and harmonizing state")
provider_state = self._step_providers(all_agent_actions)
self._log.info("Checking if all agents are active")
self._check_if_acting_on_active_agents(agent_actions)
# 3. Step bubble manager and trap manager
self._log.info("Syncing vehicle index")
self._vehicle_index.sync()
self._log.info("Stepping through bubble manager")
self._bubble_manager.step(self)
self._log.info("Stepping through trap manager")
self._trap_manager.step(self)
# 4. Calculate observation and reward
# We pre-compute vehicle_states here because we *think* the users will
# want these during their observation/reward computations.
# This is a hack to give us some short term perf wins. Longer term we
# need to expose better support for batched computations
self._vehicle_states = [v.state for v in self._vehicle_index.vehicles]
# Agents
self._log.info("Stepping through sensors")
self._agent_manager.step_sensors(self)
if self._renderer:
# runs through the render pipeline (for camera-based sensors)
# MUST perform this after step_sensors() above, and before observe() below,
# so that all updates are ready before rendering happens per
self._log.info("Running through the render pipeline")
self._renderer.render()
self._log.info("Calculating observations and rewards")
observations, rewards, scores, dones = self._agent_manager.observe(self)
self._log.info("Filtering response for ego")
response_for_ego = self._agent_manager.filter_response_for_ego(
(observations, rewards, scores, dones)
)
# 5. Send observations to social agents
self._log.info("Sending observations to social agents")
self._agent_manager.send_observations_to_social_agents(observations)
# 6. Clear done agents
self._log.info("Clearing done agents")
self._teardown_done_agents_and_vehicles(dones)
# 7. Perform visualization
self._log.info("Trying to emit the envision state")
self._try_emit_envision_state(provider_state, observations, scores)
self._log.info("Trying to emit the visdom observations")
self._try_emit_visdom_obs(observations)
observations, rewards, scores, dones = response_for_ego
extras = dict(scores=scores)
self._step_count += 1
return observations, rewards, dones, extras
def _teardown_done_agents_and_vehicles(self, dones):
def done_vehicle_ids(dones):
vehicle_ids = set()
for agent_id, done in | |
RESURL = 'https://www.googleapis.com/customsearch/v1?'
DEFAULT_CX = 'a07e95fc47a466f98'
START_PAGE_CAP = 91
CR = {
'Afghanistan': 'countryAF',
'Albania': 'countryAL',
'Algeria': 'countryDZ',
'American Samoa': 'countryAS',
'Andorra': 'countryAD',
'Angola': 'countryAO',
'Anguilla': 'countryAI',
'Antarctica': 'countryAQ',
'Antigua and Barbuda': 'countryAG',
'Argentina': 'countryAR',
'Armenia': 'countryAM',
'Aruba': 'countryAW',
'Australia': 'countryAU',
'Austria': 'countryAT',
'Azerbaijan': 'countryAZ',
'Bahamas': 'countryBS',
'Bahrain': 'countryBH',
'Bangladesh': 'countryBD',
'Barbados': 'countryBB',
'Belarus': 'countryBY',
'Belgium': 'countryBE',
'Belize': 'countryBZ',
'Benin': 'countryBJ',
'Bermuda': 'countryBM',
'Bhutan': 'countryBT',
'Bolivia': 'countryBO',
'Bosnia and Herzegovina': 'countryBA',
'Botswana': 'countryBW',
'Bouvet Island': 'countryBV',
'Brazil': 'countryBR',
'British Indian Ocean Territory': 'countryIO',
'Brunei Darussalam': 'countryBN',
'Bulgaria': 'countryBG',
'Burkina Faso': 'countryBF',
'Burundi': 'countryBI',
'Cambodia': 'countryKH',
'Cameroon': 'countryCM',
'Canada': 'countryCA',
'Cape Verde': 'countryCV',
'Cayman Islands': 'countryKY',
'Central African Republic': 'countryCF',
'Chad': 'countryTD',
'Chile': 'countryCL',
'China': 'countryCN',
'Christmas Island': 'countryCX',
'Cocos (Keeling) Islands': 'countryCC',
'Colombia': 'countryCO',
'Comoros': 'countryKM',
'Congo': 'countryCG',
'Congo, the Democratic Republic of the': 'countryCD',
'Cook Islands': 'countryCK',
'Costa Rica': 'countryCR',
'Cote D\'ivoire': 'countryCI',
'Croatia (Hrvatska)': 'countryHR',
'Cuba': 'countryCU',
'Cyprus': 'countryCY',
'Czech Republic': 'countryCZ',
'Denmark': 'countryDK',
'Djibouti': 'countryDJ',
'Dominica': 'countryDM',
'Dominican Republic': 'countryDO',
'East Timor': 'countryTP',
'Ecuador': 'countryEC',
'Egypt': 'countryEG',
'El Salvador': 'countrySV',
'Equatorial Guinea': 'countryGQ',
'Eritrea': 'countryER',
'Estonia': 'countryEE',
'Ethiopia': 'countryET',
'European Union': 'countryEU',
'Falkland Islands (Malvinas)': 'countryFK',
'Faroe Islands': 'countryFO',
'Fiji': 'countryFJ',
'Finland': 'countryFI',
'France': 'countryFR',
'France, Metropolitan': 'countryFX',
'French Guiana': 'countryGF',
'French Polynesia': 'countryPF',
'French Southern Territories': 'countryTF',
'Gabon': 'countryGA',
'Gambia': 'countryGM',
'Georgia': 'countryGE',
'Germany': 'countryDE',
'Ghana': 'countryGH',
'Gibraltar': 'countryGI',
'Greece': 'countryGR',
'Greenland': 'countryGL',
'Grenada': 'countryGD',
'Guadeloupe': 'countryGP',
'Guam': 'countryGU',
'Guatemala': 'countryGT',
'Guinea': 'countryGN',
'Guinea-Bissau': 'countryGW',
'Guyana': 'countryGY',
'Haiti': 'countryHT',
'Heard Island and Mcdonald Islands': 'countryHM',
'Holy See (Vatican City State)': 'countryVA',
'Honduras': 'countryHN',
'Hong Kong': 'countryHK',
'Hungary': 'countryHU',
'Iceland': 'countryIS',
'India': 'countryIN',
'Indonesia': 'countryID',
'Iran, Islamic Republic of': 'countryIR',
'Iraq': 'countryIQ',
'Ireland': 'countryIE',
'Israel': 'countryIL',
'Italy': 'countryIT',
'Jamaica': 'countryJM',
'Japan': 'countryJP',
'Jordan': 'countryJO',
'Kazakhstan': 'countryKZ',
'Kenya': 'countryKE',
'Kiribati': 'countryKI',
'Korea, Democratic People\'s Republic of': 'countryKP',
'Korea, Republic of': 'countryKR',
'Kuwait': 'countryKW',
'Kyrgyzstan': 'countryKG',
'Lao People\'s Democratic Republic': 'countryLA',
'Latvia': 'countryLV',
'Lebanon': 'countryLB',
'Lesotho': 'countryLS',
'Liberia': 'countryLR',
'Libyan Arab Jamahiriya': 'countryLY',
'Liechtenstein': 'countryLI',
'Lithuania': 'countryLT',
'Luxembourg': 'countryLU',
'Macao': 'countryMO',
'Macedonia, the Former Yugosalv Republic of': 'countryMK',
'Madagascar': 'countryMG',
'Malawi': 'countryMW',
'Malaysia': 'countryMY',
'Maldives': 'countryMV',
'Mali': 'countryML',
'Malta': 'countryMT',
'Marshall Islands': 'countryMH',
'Martinique': 'countryMQ',
'Mauritania': 'countryMR',
'Mauritius': 'countryMU',
'Mayotte': 'countryYT',
'Mexico': 'countryMX',
'Micronesia, Federated States of': 'countryFM',
'Moldova, Republic of': 'countryMD',
'Monaco': 'countryMC',
'Mongolia': 'countryMN',
'Montserrat': 'countryMS',
'Morocco': 'countryMA',
'Mozambique': 'countryMZ',
'Myanmar': 'countryMM',
'Namibia': 'countryNA',
'Nauru': 'countryNR',
'Nepal': 'countryNP',
'Netherlands': 'countryNL',
'Netherlands Antilles': 'countryAN',
'New Caledonia': 'countryNC',
'New Zealand': 'countryNZ',
'Nicaragua': 'countryNI',
'Niger': 'countryNE',
'Nigeria': 'countryNG',
'Niue': 'countryNU',
'Norfolk Island': 'countryNF',
'Northern Mariana Islands': 'countryMP',
'Norway': 'countryNO',
'Oman': 'countryOM',
'Pakistan': 'countryPK',
'Palau': 'countryPW',
'Palestinian Territory': 'countryPS',
'Panama': 'countryPA',
'Papua New Guinea': 'countryPG',
'Paraguay': 'countryPY',
'Peru': 'countryPE',
'Philippines': 'countryPH',
'Pitcairn': 'countryPN',
'Poland': 'countryPL',
'Portugal': 'countryPT',
'Puerto Rico': 'countryPR',
'Qatar': 'countryQA',
'Reunion': 'countryRE',
'Romania': 'countryRO',
'Russian Federation': 'countryRU',
'Rwanda': 'countryRW',
'Saint Helena': 'countrySH',
'Saint Kitts and Nevis': 'countryKN',
'Saint Lucia': 'countryLC',
'Saint Pierre and Miquelon': 'countryPM',
'Saint Vincent and the Grenadines': 'countryVC',
'Samoa': 'countryWS',
'San Marino': 'countrySM',
'Sao Tome and Principe': 'countryST',
'Saudi Arabia': 'countrySA',
'Senegal': 'countrySN',
'Serbia and Montenegro': 'countryCS',
'Seychelles': 'countrySC',
'Sierra Leone': 'countrySL',
'Singapore': 'countrySG',
'Slovakia': 'countrySK',
'Slovenia': 'countrySI',
'Solomon Islands': 'countrySB',
'Somalia': 'countrySO',
'South Africa': 'countryZA',
'South Georgia and the South Sandwich Islands': 'countryGS',
'Spain': 'countryES',
'Sri Lanka': 'countryLK',
'Sudan': 'countrySD',
'Suriname': 'countrySR',
'Svalbard and Jan Mayen': 'countrySJ',
'Swaziland': 'countrySZ',
'Sweden': 'countrySE',
'Switzerland': 'countryCH',
'Syrian Arab Republic': 'countrySY',
'Taiwan, Province of China': 'countryTW',
'Tajikistan': 'countryTJ',
'Tanzania, United Republic of': 'countryTZ',
'Thailand': 'countryTH',
'Togo': 'countryTG',
'Tokelau': 'countryTK',
'Tonga': 'countryTO',
'Trinidad and Tobago': 'countryTT',
'Tunisia': 'countryTN',
'Turkey': 'countryTR',
'Turkmenistan': 'countryTM',
'Turks and Caicos Islands': 'countryTC',
'Tuvalu': 'countryTV',
'Uganda': 'countryUG',
'Ukraine': 'countryUA',
'United Arab Emirates': 'countryAE',
'United Kingdom': 'countryUK',
'United States': 'countryUS',
'United States Minor Outlying Islands': 'countryUM',
'Uruguay': 'countryUY',
'Uzbekistan': 'countryUZ',
'Vanuatu': 'countryVU',
'Venezuela': 'countryVE',
'Vietnam': 'countryVN',
'Virgin Islands, British': 'countryVG',
'Virgin Islands, U.S.': 'countryVI',
'Wallis and Futuna': 'countryWF',
'Western Sahara': 'countryEH',
'Yemen': 'countryYE',
'Yugoslavia': 'countryYU',
'Zambia': 'countryZM',
'Zimbabwe': 'countryZW'
}
GL = {
'Afghanistan': 'af',
'Albania': 'al',
'Algeria': 'dz',
'American Samoa': 'as',
'Andorra': 'ad',
'Angola': 'ao',
'Anguilla': 'ai',
'Antarctica': 'aq',
'Antigua and Barbuda': 'ag',
'Argentina': 'ar',
'Armenia': 'am',
'Aruba': 'aw',
'Australia': 'au',
'Austria': 'at',
'Azerbaijan': 'az',
'Bahamas': 'bs',
'Bahrain': 'bh',
'Bangladesh': 'bd',
'Barbados': 'bb',
'Belarus': 'by',
'Belgium': 'be',
'Belize': 'bz',
'Benin': 'bj',
'Bermuda': 'bm',
'Bhutan': 'bt',
'Bolivia': 'bo',
'Bosnia and Herzegovina': 'ba',
'Botswana': 'bw',
'Bouvet Island': 'bv',
'Brazil': 'br',
'British Indian Ocean Territory': 'io',
'Brunei Darussalam': 'bn',
'Bulgaria': 'bg',
'Burkina Faso': 'bf',
'Burundi': 'bi',
'Cambodia': 'kh',
'Cameroon': 'cm',
'Canada': 'ca',
'Cape Verde': 'cv',
'Cayman Islands': 'ky',
'Central African Republic': 'cf',
'Chad': 'td',
'Chile': 'cl',
'China': 'cn',
'Christmas Island': 'cx',
'Cocos (Keeling) Islands': 'cc',
'Colombia': 'co',
'Comoros': 'km',
'Congo': 'cg',
'Congo, the Democratic Republic of the': 'cd',
'Cook Islands': 'ck',
'Costa Rica': 'cr',
'Cote D\'ivoire': 'ci',
'Croatia': 'hr',
'Cuba': 'cu',
'Cyprus': 'cy',
'Czech Republic': 'cz',
'Denmark': 'dk',
'Djibouti': 'dj',
'Dominica': 'dm',
'Dominican Republic': 'do',
'Ecuador': 'ec',
'Egypt': 'eg',
'El Salvador': 'sv',
'Equatorial Guinea': 'gq',
'Eritrea': 'er',
'Estonia': 'ee',
'Ethiopia': 'et',
'Falkland Islands (Malvinas)': 'fk',
'Faroe Islands': 'fo',
'Fiji': 'fj',
'Finland': 'fi',
'France': 'fr',
'French Guiana': 'gf',
'French Polynesia': 'pf',
'French Southern Territories': 'tf',
'Gabon': 'ga',
'Gambia': 'gm',
'Georgia': 'ge',
'Germany': 'de',
'Ghana': 'gh',
'Gibraltar': 'gi',
'Greece': 'gr',
'Greenland': 'gl',
'Grenada': 'gd',
'Guadeloupe': 'gp',
'Guam': 'gu',
'Guatemala': 'gt',
'Guinea': 'gn',
'Guinea-Bissau': 'gw',
'Guyana': 'gy',
'Haiti': 'ht',
'Heard Island and Mcdonald Islands': 'hm',
'Holy See (Vatican City State)': 'va',
'Honduras': 'hn',
'Hong Kong': 'hk',
'Hungary': 'hu',
'Iceland': 'is',
'India': 'in',
'Indonesia': 'id',
'Iran, Islamic Republic of': 'ir',
'Iraq': 'iq',
'Ireland': 'ie',
'Israel': 'il',
'Italy': 'it',
'Jamaica': 'jm',
'Japan': 'jp',
'Jordan': 'jo',
'Kazakhstan': 'kz',
'Kenya': 'ke',
'Kiribati': 'ki',
'Korea, Democratic People\'s Republic of': 'kp',
'Korea, Republic of': 'kr',
'Kuwait': 'kw',
'Kyrgyzstan': 'kg',
'Lao People\'s Democratic Republic': 'la',
'Latvia': 'lv',
'Lebanon': 'lb',
'Lesotho': 'ls',
'Liberia': 'lr',
'Libyan Arab Jamahiriya': 'ly',
'Liechtenstein': 'li',
'Lithuania': 'lt',
'Luxembourg': 'lu',
'Macao': 'mo',
'Macedonia, the Former Yugosalv Republic of': 'mk',
'Madagascar': 'mg',
'Malawi': 'mw',
'Malaysia': 'my',
'Maldives': 'mv',
'Mali': 'ml',
'Malta': 'mt',
'Marshall Islands': 'mh',
'Martinique': 'mq',
'Mauritania': 'mr',
'Mauritius': 'mu',
'Mayotte': 'yt',
'Mexico': 'mx',
'Micronesia, Federated States of': 'fm',
'Moldova, Republic of': 'md',
'Monaco': 'mc',
'Mongolia': 'mn',
'Montserrat': 'ms',
'Morocco': 'ma',
'Mozambique': 'mz',
'Myanmar': 'mm',
'Namibia': 'na',
'Nauru': 'nr',
'Nepal': 'np',
'Netherlands': 'nl',
'Netherlands Antilles': 'an',
'New Caledonia': 'nc',
'New Zealand': 'nz',
'Nicaragua': 'ni',
'Niger': 'ne',
'Nigeria': 'ng',
'Niue': 'nu',
'Norfolk Island': 'nf',
'Northern Mariana Islands': 'mp',
'Norway': 'no',
'Oman': 'om',
'Pakistan': 'pk',
'Palau': 'pw',
'Palestinian Territory, Occupied': 'ps',
'Panama': 'pa',
'Papua New Guinea': 'pg',
'Paraguay': 'py',
'Peru': 'pe',
'Philippines': 'ph',
'Pitcairn': 'pn',
'Poland': 'pl',
'Portugal': 'pt',
'Puerto Rico': 'pr',
'Qatar': 'qa',
'Reunion': 're',
'Romania': 'ro',
'Russian Federation': 'ru',
'Rwanda': 'rw',
'Saint Helena': 'sh',
'Saint Kitts and Nevis': 'kn',
'Saint Lucia': 'lc',
'Saint Pierre and Miquelon': 'pm',
'Saint Vincent and the Grenadines': 'vc',
'Samoa': 'ws',
'San Marino': 'sm',
'Sao Tome and Principe': 'st',
'Saudi Arabia': 'sa',
'Senegal': 'sn',
'Serbia and Montenegro': 'cs',
'Seychelles': 'sc',
'Sierra Leone': 'sl',
'Singapore': 'sg',
'Slovakia': 'sk',
'Slovenia': 'si',
'Solomon Islands': 'sb',
'Somalia': 'so',
'South Africa': 'za',
'South Georgia and the South Sandwich Islands': 'gs',
'Spain': 'es',
'Sri Lanka': 'lk',
'Sudan': 'sd',
'Suriname': 'sr',
'Svalbard and Jan Mayen': 'sj',
'Swaziland': 'sz',
'Sweden': 'se',
'Switzerland': 'ch',
'Syrian Arab Republic': 'sy',
'Taiwan, Province of China': 'tw',
'Tajikistan': 'tj',
'Tanzania, United Republic of': 'tz',
'Thailand': 'th',
'Timor-Leste': 'tl',
'Togo': 'tg',
'Tokelau': 'tk',
'Tonga': 'to',
'Trinidad and Tobago': 'tt',
'Tunisia': 'tn',
'Turkey': 'tr',
'Turkmenistan': 'tm',
'Turks and Caicos Islands': 'tc',
'Tuvalu': 'tv',
'Uganda': 'ug',
'Ukraine': 'ua',
'United Arab Emirates': 'ae',
'United Kingdom': 'uk',
'United States': 'us',
'United States Minor Outlying Islands': 'um',
'Uruguay': 'uy',
'Uzbekistan': 'uz',
'Vanuatu': 'vu',
'Venezuela': 've',
'Viet Nam': 'vn',
'Virgin Islands, British': 'vg',
'Virgin Islands, U.S.': 'vi',
'Wallis and Futuna': 'wf',
'Western Sahara': 'eh',
'Yemen': 'ye',
'Zambia': 'zm',
'Zimbabwe': 'zw'
}
LR = {
'lang_ar': 'Arabic',
'lang_bg': 'Bulgarian',
'lang_ca': 'Catalan',
'lang_cs': 'Czech',
'lang_da': 'Danish',
'lang_de': 'German',
'lang_el': 'Greek',
'lang_en': | |
<reponame>SwapneelM/projectaeora
from collections import defaultdict
import html2text
from textblob import TextBlob
import requests
import bs4
import os
from datetime import datetime, timedelta
def get_stopwords():
"""
:return: A list of stop words
"""
# source=http://xpo6.com/list-of-english-stop-words/
filepath = os.path.dirname(__file__) + '/stopwords.txt'
stopwords = list()
with open(filepath) as fp:
line = fp.readline()
while line:
stopwords.append(line.strip())
line = fp.readline()
return stopwords
def get_keywords(article):
"""
:param article: A TextBlob of text to find the keywords from
:return: A list of keywords from the text
"""
stopwords = get_stopwords()
words = article.words
non_stopwords = list()
for word in words:
if word.lower() not in stopwords:
non_stopwords.append(word.lower())
words_sorted_by_frequency = sorted(non_stopwords, key=non_stopwords.count, reverse=True)
keywords = set()
for word in words_sorted_by_frequency:
if len(keywords)<5:
keywords.add(word.title())
else:
break
return list(keywords)
def get_analysis(content):
"""
:param content: A string with the text content to analyse
:return: A string specifying the sentiment of the text
:return: A list of the keywords in the text
"""
blob = TextBlob(content)
keywords = get_keywords(blob)
if blob.sentiment.polarity > 0:
sentiment = 'positive'
elif blob.sentiment.polarity == 0:
sentiment = 'neutral'
else:
sentiment = 'negative'
return sentiment, keywords
# to_english determines the english word that will be substituted for the attribute name
to_english = {"bid": "bid", "offer": "offer", "sector": "sector", "sub_sector": "sub-sector",
"high": "high", "low": "low", "diff": "change", "per_diff": "percentage change",
"last_close_value": "last close", "last_close_date": "last close date", "revenue": "revenue",
"market_cap": "market cap", "volume": "volume", "price": "price"}
def big_movers_card(top5, risers=True):
"""
:param top5: A list of tuples containing data about the top 5 companies
:param risers: Specify whether the list contains the risers (True) or fallers (False)
:return: A dictionary containing the layout of the big movers card tables.
"""
big_movers = defaultdict()
# Build phrase for the voice output.
category = 'risers' if risers else 'fallers'
speech = 'The top 5 ' + category + ' are '
companies = []
for i in range(len(top5)):
row = defaultdict()
row['name'] = top5[i][0]
row['price'] = top5[i][1]
row['percentage_change'] = top5[i][2]
speech += row['name']
if i < len(top5) - 2:
speech += ', '
else:
if i == len(top5) - 1:
speech += '.'
else:
speech += ' and '
companies.append(row)
big_movers['speech'] = speech
# Build elements for the card visualisation
card = defaultdict()
card['title'] = 'Top ' + category.title()
card['companies'] = companies
big_movers['text'] = card
big_movers['type'] = 'top'
return big_movers
def news_reply(financial_news, days, positive_negative):
"""
:param financial_news: A dictionary containing lists of LSE news and YAHOO news
:param days: An integer specifying the number of days of news the response should contain
:return: A dictionary containing the layout of the news cards
"""
reply = defaultdict()
# filter out LSE news that is too old
lse_news = []
for el in financial_news['LSE']:
date = datetime.strptime(el.date, '%H:%M %d-%b-%Y')
if date.date() >= datetime.now().date() - timedelta(days):
row = {}
row["date"] = el.date
row["headline"] = el.headline
row["url"] = el.url
row["source"] = el.source
row["impact"] = el.impact
row["summary"] = "No summary is available."
row["sentiment"] = "none"
row["keywords"] = list()
row["company"] = el.company
lse_news.append(row)
# filter out YAHOO news that is too old
yahoo_news = []
number_positive = number_neutral = number_negative = 0
for i in financial_news['YAHOO']:
date = datetime.strptime(i.date, '%H:%M %d-%b-%Y')
if date.date() >= datetime.now().date() - timedelta(days):
row = {}
row["date"] = i.date
row["headline"] = i.headline
row["url"] = i.url
row["source"] = i.source
row["impact"] = i.impact
row["summary"] = i.description[:256]
if row["summary"][-3:] != "...":
row["summary"] += "..."
row["sentiment"], row["keywords"] = get_analysis(i.description)
row["company"] = i.company
yahoo_news.append(row)
if row["sentiment"] == "positive":
number_positive += 1
elif row["sentiment"] == "neutral":
number_neutral += 1
else:
number_negative += 1
news = lse_news + yahoo_news
news.sort(key=lambda x: datetime.strptime(x["date"], '%H:%M %d-%b-%Y'), reverse=True)
if news:
reply['speech'] = "Here are some news articles that I've found!"
reply['type'] = 'news'
reply['text'] = news
if positive_negative:
number_positive = 'no' if number_positive == 0 else number_positive
number_neutral = 'no' if number_neutral == 0 else number_neutral
number_negative = 'no' if number_negative == 0 else number_negative
reply["positive_negative"] = "There are {} positive, {} neutral and {} negative articles.".format(
number_positive, number_neutral, number_negative)
reply['speech'] += " "
reply['speech'] += reply['positive_negative']
else:
message = "I'm sorry, I couldn't find any recent articles. Try increasing the date period from the " \
"settings page or asking for an older date."
reply['speech'] = message
reply['type'] = "no-news"
reply['text'] = message
return reply
def get_company_reply(company, attribute):
"""
:param company: A Company whose data was requested
:param attribute: The attribute that the response should display the value of
:return: A dictionary containing the layout of the company card
"""
reply = defaultdict()
try:
value = getattr(company.stock, attribute)
except AttributeError:
value = getattr(company, attribute)
# related_attribute determines what related data will appear to complement the requested data
related_attribute = {"bid": "offer", "offer": "bid", "sector": "sub_sector",
"sub_sector": "sector", "high": "low", "low" : "high", "diff": "per_diff",
"per_diff": "diff", "last_close_value": "last_close_date",
"last_close_date": "last_close_value", "revenue": "market_cap",
"market_cap": "volume", "volume" : "price", "price": "per_diff"}
secondary_attribute = related_attribute[attribute]
try:
secondary_value = getattr(company.stock, secondary_attribute)
except AttributeError:
secondary_value = getattr(company, secondary_attribute)
card = {'name' : company.name,'code': company.code,'date': company.date,'primary': value,
'secondary': secondary_value,'primary_type': attribute, 'secondary_type': secondary_attribute}
reply['text'] = card
reply['type'] = 'company'
reply['speech'] = "The " + to_english[attribute] + " of " + company.name + " is " + value
return reply
def comparison_reply(company_data):
reply = defaultdict()
companies = []
for i in range(len(company_data)):
companies.append(get_company_reply(company_data[i], 'price'))
if len(companies) == 1:
reply['text'] = companies
reply['type'] = 'comparison'
reply['speech'] = "The price of {} is {}.".format(companies[0]['text']['name'], companies[0]['text']['primary'])
else:
reply['text'] = companies
reply['type'] = 'comparison'
reply['speech'] = 'Here is the side by side comparison of ' + company_data[0].name
for i in range(1, len(company_data)):
reply['speech'] += ' and {}'.format(company_data[i].name)
return reply
def sector_reply(sector, sector_attribute):
"""
:param sector: A Sector whose data was requested
:param sector_attribute: The attribute that the response should show the value of
:return: A dictionary containing the layout of either a 'company card'/'big movers card'/'news card'
"""
data = getattr(sector, sector_attribute)
if sector_attribute == "highest_price" or sector_attribute == "lowest_price":
data = getattr(sector, sector_attribute)
sector_name = sector.name
speech = "{} has the {} {} in {}: {}".format(data.name, sector_attribute.split('_',1)[0],
sector_attribute.split('_', 1)[1], sector_name,
getattr(data.stock, sector_attribute.split('_', 1)[1]))
response = get_company_reply(data, "price")
response['speech'] = speech
return response
elif sector_attribute == "rising" or sector_attribute == "falling":
number_of_companies_in_sector = len(sector.companies)
number_of_companies_moving_in_requested_direction = len(data)
if number_of_companies_moving_in_requested_direction == 0:
speech = "No "+sector.name+" are "+sector_attribute+". "
else:
speech = "The following "+sector.name+" companies are "+sector_attribute+". "
companies = []
# Build table of data
for i in range(len(data)):
row = defaultdict()
row['name'] = data[i].name
row['price'] = data[i].stock.price
row['percentage_change'] = data[i].stock.per_diff
speech += row['name']
if i < len(data) - 2:
speech += ', '
else:
if i == len(data) - 1:
speech += '.'
else:
speech += ' and '
companies.append(row)
movers = defaultdict()
movers['speech'] = speech
# Build elements for the card visualisation
card = defaultdict()
card['title'] = str(len(data))+'/'+str(number_of_companies_in_sector)+' '+sector.name+' are '+sector_attribute
card['companies'] = companies
if number_of_companies_moving_in_requested_direction == 0:
movers['text'] = speech
movers['type'] = 'no-data'
else:
movers['text'] = card
movers['type'] = 'top'
return movers
elif sector_attribute == "performing":
companies = []
for i in range(len(data)):
row = defaultdict()
row['name'] = data[i].name
row['price'] = data[i].stock.price
row['percentage_change'] = data[i].stock.per_diff
companies.append(row)
movers = defaultdict()
movers['speech'] = "Here is some data about how "+sector.name+" are performing"
card = defaultdict()
card['title'] = "Performance of "+sector.name
card['companies'] = companies
movers['text'] = card
movers['type'] = 'top'
return movers
def members_reply(sector):
data = sector.companies
companies = []
for i in range(len(data)):
row = defaultdict()
row['name'] = data[i].name
companies.append(row)
movers = defaultdict()
movers['speech'] = "Here are the members of " + sector.name
card = defaultdict()
card['title'] = "Members of " + sector.name
card['companies'] = companies
movers['text'] = card
movers['type'] = 'members'
return movers
def revenue_reply(company, date_period):
"""
:param company: A Company whose revenue was requested
:param date_period: A string containing the date for which revenue data was requested for
:return: A dictionary containing the layout of the revenue card
"""
response = {}
card = {}
card['title'] = company.name
card['revenue_data'] = list()
response['speech'] = "Here is the revenue data for " + company.name
response['type'] = "revenue"
response['text'] = card
valid_date = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.