input
stringlengths
2.65k
237k
output
stringclasses
1 value
<gh_stars>0 # Copyright 2019 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. # This file is automatically generated by mkgrokdump and should not # be modified manually. # List of known V8 instance types. INSTANCE_TYPES = { 0: "INTERNALIZED_STRING_TYPE", 2: "EXTERNAL_INTERNALIZED_STRING_TYPE", 8: "ONE_BYTE_INTERNALIZED_STRING_TYPE", 10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", 18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE", 26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", 32: "STRING_TYPE", 33: "CONS_STRING_TYPE", 34: "EXTERNAL_STRING_TYPE", 35: "SLICED_STRING_TYPE", 37: "THIN_STRING_TYPE", 40: "ONE_BYTE_STRING_TYPE", 41: "CONS_ONE_BYTE_STRING_TYPE", 42: "EXTERNAL_ONE_BYTE_STRING_TYPE", 43: "SLICED_ONE_BYTE_STRING_TYPE", 45: "THIN_ONE_BYTE_STRING_TYPE", 50: "UNCACHED_EXTERNAL_STRING_TYPE", 58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE", 64: "SYMBOL_TYPE", 65: "BIG_INT_BASE_TYPE", 66: "HEAP_NUMBER_TYPE", 67: "ODDBALL_TYPE", 68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE", 69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE", 70: "FOREIGN_TYPE", 71: "WASM_TYPE_INFO_TYPE", 72: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE", 73: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE", 74: "CALLABLE_TASK_TYPE", 75: "CALLBACK_TASK_TYPE", 76: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE", 77: "LOAD_HANDLER_TYPE", 78: "STORE_HANDLER_TYPE", 79: "FUNCTION_TEMPLATE_INFO_TYPE", 80: "OBJECT_TEMPLATE_INFO_TYPE", 81: "ACCESS_CHECK_INFO_TYPE", 82: "ACCESSOR_INFO_TYPE", 83: "ACCESSOR_PAIR_TYPE", 84: "ALIASED_ARGUMENTS_ENTRY_TYPE", 85: "ALLOCATION_MEMENTO_TYPE", 86: "ALLOCATION_SITE_TYPE", 87: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE", 88: "ASM_WASM_DATA_TYPE", 89: "ASYNC_GENERATOR_REQUEST_TYPE", 90: "BREAK_POINT_TYPE", 91: "BREAK_POINT_INFO_TYPE", 92: "CACHED_TEMPLATE_OBJECT_TYPE", 93: "CALL_HANDLER_INFO_TYPE", 94: "CLASS_POSITIONS_TYPE", 95: "DEBUG_INFO_TYPE", 96: "ENUM_CACHE_TYPE", 97: "FEEDBACK_CELL_TYPE", 98: "FUNCTION_TEMPLATE_RARE_DATA_TYPE", 99: "INTERCEPTOR_INFO_TYPE", 100: "INTERPRETER_DATA_TYPE", 101: "MODULE_REQUEST_TYPE", 102: "PROMISE_CAPABILITY_TYPE", 103: "PROMISE_REACTION_TYPE", 104: "PROPERTY_DESCRIPTOR_OBJECT_TYPE", 105: "PROTOTYPE_INFO_TYPE", 106: "SCRIPT_TYPE", 107: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE", 108: "STACK_FRAME_INFO_TYPE", 109: "STACK_TRACE_FRAME_TYPE", 110: "TEMPLATE_OBJECT_DESCRIPTION_TYPE", 111: "TUPLE2_TYPE", 112: "WASM_EXCEPTION_TAG_TYPE", 113: "WASM_EXPORTED_FUNCTION_DATA_TYPE", 114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE", 115: "WASM_JS_FUNCTION_DATA_TYPE", 116: "FIXED_ARRAY_TYPE", 117: "HASH_TABLE_TYPE", 118: "EPHEMERON_HASH_TABLE_TYPE", 119: "GLOBAL_DICTIONARY_TYPE", 120: "NAME_DICTIONARY_TYPE", 121: "NUMBER_DICTIONARY_TYPE", 122: "ORDERED_HASH_MAP_TYPE", 123: "ORDERED_HASH_SET_TYPE", 124: "ORDERED_NAME_DICTIONARY_TYPE", 125: "SIMPLE_NUMBER_DICTIONARY_TYPE", 126: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE", 127: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE", 128: "SCRIPT_CONTEXT_TABLE_TYPE", 129: "BYTE_ARRAY_TYPE", 130: "BYTECODE_ARRAY_TYPE", 131: "FIXED_DOUBLE_ARRAY_TYPE", 132: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE", 133: "SCOPE_INFO_TYPE", 134: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE", 135: "AWAIT_CONTEXT_TYPE", 136: "BLOCK_CONTEXT_TYPE", 137: "CATCH_CONTEXT_TYPE", 138: "DEBUG_EVALUATE_CONTEXT_TYPE", 139: "EVAL_CONTEXT_TYPE", 140: "FUNCTION_CONTEXT_TYPE", 141: "MODULE_CONTEXT_TYPE", 142: "NATIVE_CONTEXT_TYPE", 143: "SCRIPT_CONTEXT_TYPE", 144: "WITH_CONTEXT_TYPE", 145: "EXPORTED_SUB_CLASS_BASE_TYPE", 146: "EXPORTED_SUB_CLASS_TYPE", 147: "EXPORTED_SUB_CLASS2_TYPE", 148: "SMALL_ORDERED_HASH_MAP_TYPE", 149: "SMALL_ORDERED_HASH_SET_TYPE", 150: "SMALL_ORDERED_NAME_DICTIONARY_TYPE", 151: "DESCRIPTOR_ARRAY_TYPE", 152: "STRONG_DESCRIPTOR_ARRAY_TYPE", 153: "SOURCE_TEXT_MODULE_TYPE", 154: "SYNTHETIC_MODULE_TYPE", 155: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE", 156: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE", 157: "WEAK_FIXED_ARRAY_TYPE", 158: "TRANSITION_ARRAY_TYPE", 159: "CELL_TYPE", 160: "CODE_TYPE", 161: "CODE_DATA_CONTAINER_TYPE", 162: "COVERAGE_INFO_TYPE", 163: "EMBEDDER_DATA_ARRAY_TYPE", 164: "FEEDBACK_METADATA_TYPE", 165: "FEEDBACK_VECTOR_TYPE", 166: "FILLER_TYPE", 167: "FREE_SPACE_TYPE", 168: "INTERNAL_CLASS_TYPE", 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE", 170: "MAP_TYPE", 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE", 172: "PREPARSE_DATA_TYPE", 173: "PROPERTY_ARRAY_TYPE", 174: "PROPERTY_CELL_TYPE", 175: "SHARED_FUNCTION_INFO_TYPE", 176: "SMI_BOX_TYPE", 177: "SMI_PAIR_TYPE", 178: "SORT_STATE_TYPE", 179: "WASM_ARRAY_TYPE", 180: "WASM_CAPI_FUNCTION_DATA_TYPE", 181: "WASM_STRUCT_TYPE", 182: "WEAK_ARRAY_LIST_TYPE", 183: "WEAK_CELL_TYPE", 184: "JS_PROXY_TYPE", 1057: "JS_OBJECT_TYPE", 185: "JS_GLOBAL_OBJECT_TYPE", 186: "JS_GLOBAL_PROXY_TYPE", 187: "JS_MODULE_NAMESPACE_TYPE", 1040: "JS_SPECIAL_API_OBJECT_TYPE", 1041: "JS_PRIMITIVE_WRAPPER_TYPE", 1042: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE", 1043: "JS_ITERATOR_PROTOTYPE_TYPE", 1044: "JS_MAP_ITERATOR_PROTOTYPE_TYPE", 1045: "JS_OBJECT_PROTOTYPE_TYPE", 1046: "JS_PROMISE_PROTOTYPE_TYPE", 1047: "JS_REG_EXP_PROTOTYPE_TYPE", 1048: "JS_SET_ITERATOR_PROTOTYPE_TYPE", 1049: "JS_SET_PROTOTYPE_TYPE", 1050: "JS_STRING_ITERATOR_PROTOTYPE_TYPE", 1051: "JS_TYPED_ARRAY_PROTOTYPE_TYPE", 1052: "JS_GENERATOR_OBJECT_TYPE", 1053: "JS_ASYNC_FUNCTION_OBJECT_TYPE", 1054: "JS_ASYNC_GENERATOR_OBJECT_TYPE", 1055: "JS_ARGUMENTS_OBJECT_TYPE", 1056: "JS_API_OBJECT_TYPE", 1058: "JS_BOUND_FUNCTION_TYPE", 1059: "JS_FUNCTION_TYPE", 1060: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1061: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1062: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1063: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1064: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1065: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1066: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1067: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1068: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1069: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1070: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE", 1071: "JS_ARRAY_CONSTRUCTOR_TYPE", 1072: "JS_PROMISE_CONSTRUCTOR_TYPE", 1073: "JS_REG_EXP_CONSTRUCTOR_TYPE", 1074: "JS_MAP_KEY_ITERATOR_TYPE", 1075: "JS_MAP_KEY_VALUE_ITERATOR_TYPE", 1076: "JS_MAP_VALUE_ITERATOR_TYPE", 1077: "JS_SET_KEY_VALUE_ITERATOR_TYPE", 1078: "JS_SET_VALUE_ITERATOR_TYPE", 1079: "JS_DATA_VIEW_TYPE", 1080: "JS_TYPED_ARRAY_TYPE", 1081: "JS_MAP_TYPE", 1082: "JS_SET_TYPE", 1083: "JS_WEAK_MAP_TYPE", 1084: "JS_WEAK_SET_TYPE", 1085: "JS_ARRAY_TYPE", 1086: "JS_ARRAY_BUFFER_TYPE", 1087: "JS_ARRAY_ITERATOR_TYPE", 1088: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", 1089: "JS_COLLATOR_TYPE", 1090: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", 1091: "JS_DATE_TYPE", 1092: "JS_DATE_TIME_FORMAT_TYPE", 1093: "JS_DISPLAY_NAMES_TYPE", 1094: "JS_ERROR_TYPE", 1095: "JS_FINALIZATION_REGISTRY_TYPE", 1096: "JS_LIST_FORMAT_TYPE", 1097: "JS_LOCALE_TYPE", 1098: "JS_MESSAGE_OBJECT_TYPE", 1099: "JS_NUMBER_FORMAT_TYPE", 1100: "JS_PLURAL_RULES_TYPE", 1101: "JS_PROMISE_TYPE", 1102: "JS_REG_EXP_TYPE", 1103: "JS_REG_EXP_STRING_ITERATOR_TYPE", 1104: "JS_RELATIVE_TIME_FORMAT_TYPE", 1105: "JS_SEGMENT_ITERATOR_TYPE", 1106: "JS_SEGMENTER_TYPE", 1107: "JS_SEGMENTS_TYPE", 1108: "JS_STRING_ITERATOR_TYPE", 1109: "JS_V8_BREAK_ITERATOR_TYPE", 1110: "JS_WEAK_REF_TYPE", 1111: "WASM_EXCEPTION_OBJECT_TYPE", 1112: "WASM_GLOBAL_OBJECT_TYPE", 1113: "WASM_INSTANCE_OBJECT_TYPE", 1114: "WASM_MEMORY_OBJECT_TYPE", 1115: "WASM_MODULE_OBJECT_TYPE", 1116: "WASM_TABLE_OBJECT_TYPE", } # List of known V8 maps. KNOWN_MAPS = { ("read_only_space", 0x02119): (170, "MetaMap"), ("read_only_space", 0x02141): (67, "NullMap"), ("read_only_space", 0x02169): (152, "StrongDescriptorArrayMap"), ("read_only_space", 0x02191): (157, "WeakFixedArrayMap"), ("read_only_space", 0x021d1): (96, "EnumCacheMap"), ("read_only_space", 0x02205): (116, "FixedArrayMap"), ("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"), ("read_only_space", 0x0229d): (167, "FreeSpaceMap"), ("read_only_space", 0x022c5): (166, "OnePointerFillerMap"), ("read_only_space", 0x022ed): (166, "TwoPointerFillerMap"), ("read_only_space", 0x02315): (67, "UninitializedMap"), ("read_only_space", 0x0238d): (67, "UndefinedMap"), ("read_only_space", 0x023d1): (66, "HeapNumberMap"), ("read_only_space", 0x02405): (67, "TheHoleMap"), ("read_only_space", 0x02465): (67, "BooleanMap"), ("read_only_space", 0x02509): (129, "ByteArrayMap"), ("read_only_space", 0x02531): (116, "FixedCOWArrayMap"), ("read_only_space", 0x02559): (117, "HashTableMap"), ("read_only_space", 0x02581): (64, "SymbolMap"), ("read_only_space", 0x025a9): (40, "OneByteStringMap"), ("read_only_space", 0x025d1): (133, "ScopeInfoMap"), ("read_only_space", 0x025f9): (175, "SharedFunctionInfoMap"), ("read_only_space", 0x02621): (160, "CodeMap"), ("read_only_space", 0x02649): (159, "CellMap"), ("read_only_space", 0x02671): (174, "GlobalPropertyCellMap"), ("read_only_space", 0x02699): (70, "ForeignMap"), ("read_only_space", 0x026c1): (158, "TransitionArrayMap"), ("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"), ("read_only_space", 0x02711): (165, "FeedbackVectorMap"), ("read_only_space", 0x0274d): (67, "ArgumentsMarkerMap"), ("read_only_space", 0x027ad): (67, "ExceptionMap"), ("read_only_space", 0x02809): (67, "TerminationExceptionMap"), ("read_only_space", 0x02871): (67, "OptimizedOutMap"), ("read_only_space", 0x028d1): (67, "StaleRegisterMap"), ("read_only_space", 0x02931): (128, "ScriptContextTableMap"), ("read_only_space", 0x02959): (126, "ClosureFeedbackCellArrayMap"), ("read_only_space", 0x02981): (164, "FeedbackMetadataArrayMap"), ("read_only_space", 0x029a9): (116, "ArrayListMap"), ("read_only_space", 0x029d1): (65, "BigIntMap"), ("read_only_space", 0x029f9): (127, "ObjectBoilerplateDescriptionMap"), ("read_only_space", 0x02a21): (130, "BytecodeArrayMap"), ("read_only_space", 0x02a49): (161, "CodeDataContainerMap"), ("read_only_space", 0x02a71): (162, "CoverageInfoMap"), ("read_only_space", 0x02a99): (131, "FixedDoubleArrayMap"), ("read_only_space", 0x02ac1): (119, "GlobalDictionaryMap"), ("read_only_space", 0x02ae9): (97, "ManyClosuresCellMap"), ("read_only_space", 0x02b11): (116, "ModuleInfoMap"), ("read_only_space", 0x02b39): (120, "NameDictionaryMap"), ("read_only_space", 0x02b61): (97, "NoClosuresCellMap"), ("read_only_space", 0x02b89): (121, "NumberDictionaryMap"), ("read_only_space", 0x02bb1): (97, "OneClosureCellMap"), ("read_only_space", 0x02bd9): (122, "OrderedHashMapMap"), ("read_only_space", 0x02c01): (123, "OrderedHashSetMap"), ("read_only_space", 0x02c29): (124, "OrderedNameDictionaryMap"), ("read_only_space", 0x02c51): (172, "PreparseDataMap"), ("read_only_space", 0x02c79): (173, "PropertyArrayMap"), ("read_only_space", 0x02ca1): (93, "SideEffectCallHandlerInfoMap"), ("read_only_space", 0x02cc9): (93, "SideEffectFreeCallHandlerInfoMap"), ("read_only_space", 0x02cf1): (93, "NextCallSideEffectFreeCallHandlerInfoMap"), ("read_only_space", 0x02d19): (125, "SimpleNumberDictionaryMap"), ("read_only_space", 0x02d41): (148, "SmallOrderedHashMapMap"), ("read_only_space", 0x02d69): (149, "SmallOrderedHashSetMap"), ("read_only_space", 0x02d91): (150, "SmallOrderedNameDictionaryMap"), ("read_only_space", 0x02db9): (153, "SourceTextModuleMap"), ("read_only_space", 0x02de1): (154, "SyntheticModuleMap"), ("read_only_space", 0x02e09): (71, "WasmTypeInfoMap"), ("read_only_space", 0x02e31): (182, "WeakArrayListMap"), ("read_only_space", 0x02e59): (118, "EphemeronHashTableMap"), ("read_only_space", 0x02e81): (163, "EmbedderDataArrayMap"), ("read_only_space", 0x02ea9): (183, "WeakCellMap"), ("read_only_space", 0x02ed1): (32, "StringMap"), ("read_only_space", 0x02ef9): (41, "ConsOneByteStringMap"), ("read_only_space", 0x02f21): (33, "ConsStringMap"), ("read_only_space", 0x02f49): (37, "ThinStringMap"), ("read_only_space", 0x02f71): (35, "SlicedStringMap"), ("read_only_space", 0x02f99): (43, "SlicedOneByteStringMap"), ("read_only_space", 0x02fc1): (34, "ExternalStringMap"), ("read_only_space", 0x02fe9): (42, "ExternalOneByteStringMap"), ("read_only_space", 0x03011): (50, "UncachedExternalStringMap"), ("read_only_space", 0x03039): (0, "InternalizedStringMap"), ("read_only_space", 0x03061): (2, "ExternalInternalizedStringMap"), ("read_only_space", 0x03089): (10, "ExternalOneByteInternalizedStringMap"), ("read_only_space", 0x030b1): (18, "UncachedExternalInternalizedStringMap"), ("read_only_space", 0x030d9): (26, "UncachedExternalOneByteInternalizedStringMap"), ("read_only_space", 0x03101): (58, "UncachedExternalOneByteStringMap"), ("read_only_space", 0x03129): (67, "SelfReferenceMarkerMap"), ("read_only_space", 0x03151): (67, "BasicBlockCountersMarkerMap"), ("read_only_space", 0x03195): (87, "ArrayBoilerplateDescriptionMap"), ("read_only_space", 0x03269): (99, "InterceptorInfoMap"), ("read_only_space", 0x053e5): (72, "PromiseFulfillReactionJobTaskMap"), ("read_only_space", 0x0540d): (73, "PromiseRejectReactionJobTaskMap"), ("read_only_space", 0x05435): (74, "CallableTaskMap"), ("read_only_space", 0x0545d): (75, "CallbackTaskMap"), ("read_only_space", 0x05485): (76, "PromiseResolveThenableJobTaskMap"), ("read_only_space", 0x054ad): (79, "FunctionTemplateInfoMap"), ("read_only_space", 0x054d5): (80, "ObjectTemplateInfoMap"), ("read_only_space", 0x054fd): (81, "AccessCheckInfoMap"), ("read_only_space", 0x05525): (82, "AccessorInfoMap"), ("read_only_space", 0x0554d): (83, "AccessorPairMap"), ("read_only_space", 0x05575): (84, "AliasedArgumentsEntryMap"), ("read_only_space", 0x0559d): (85, "AllocationMementoMap"), ("read_only_space", 0x055c5): (88, "AsmWasmDataMap"), ("read_only_space", 0x055ed): (89, "AsyncGeneratorRequestMap"), ("read_only_space", 0x05615): (90, "BreakPointMap"), ("read_only_space", 0x0563d): (91, "BreakPointInfoMap"), ("read_only_space", 0x05665): (92, "CachedTemplateObjectMap"), ("read_only_space", 0x0568d): (94, "ClassPositionsMap"), ("read_only_space", 0x056b5): (95, "DebugInfoMap"), ("read_only_space", 0x056dd): (98, "FunctionTemplateRareDataMap"), ("read_only_space", 0x05705): (100, "InterpreterDataMap"), ("read_only_space", 0x0572d): (101, "ModuleRequestMap"), ("read_only_space", 0x05755): (102, "PromiseCapabilityMap"), ("read_only_space", 0x0577d): (103, "PromiseReactionMap"), ("read_only_space", 0x057a5): (104, "PropertyDescriptorObjectMap"), ("read_only_space", 0x057cd): (105, "PrototypeInfoMap"), ("read_only_space", 0x057f5): (106, "ScriptMap"), ("read_only_space", 0x0581d): (107, "SourceTextModuleInfoEntryMap"), ("read_only_space", 0x05845): (108, "StackFrameInfoMap"), ("read_only_space", 0x0586d): (109, "StackTraceFrameMap"), ("read_only_space", 0x05895): (110, "TemplateObjectDescriptionMap"), ("read_only_space", 0x058bd): (111, "Tuple2Map"), ("read_only_space", 0x058e5): (112, "WasmExceptionTagMap"), ("read_only_space", 0x0590d): (113, "WasmExportedFunctionDataMap"), ("read_only_space", 0x05935): (114, "WasmIndirectFunctionTableMap"), ("read_only_space", 0x0595d): (115, "WasmJSFunctionDataMap"), ("read_only_space", 0x05985): (134, "SloppyArgumentsElementsMap"), ("read_only_space", 0x059ad): (151, "DescriptorArrayMap"), ("read_only_space", 0x059d5): (156, "UncompiledDataWithoutPreparseDataMap"), ("read_only_space", 0x059fd): (155, "UncompiledDataWithPreparseDataMap"), ("read_only_space", 0x05a25): (171, "OnHeapBasicBlockProfilerDataMap"), ("read_only_space", 0x05a4d): (180, "WasmCapiFunctionDataMap"), ("read_only_space", 0x05a75): (168, "InternalClassMap"), ("read_only_space", 0x05a9d): (177, "SmiPairMap"), ("read_only_space", 0x05ac5): (176, "SmiBoxMap"), ("read_only_space", 0x05aed): (145, "ExportedSubClassBaseMap"), ("read_only_space", 0x05b15): (146, "ExportedSubClassMap"), ("read_only_space", 0x05b3d): (68, "AbstractInternalClassSubclass1Map"), ("read_only_space", 0x05b65): (69, "AbstractInternalClassSubclass2Map"), ("read_only_space", 0x05b8d): (132, "InternalClassWithSmiElementsMap"), ("read_only_space", 0x05bb5): (169, "InternalClassWithStructElementsMap"), ("read_only_space", 0x05bdd): (147, "ExportedSubClass2Map"), ("read_only_space", 0x05c05): (178, "SortStateMap"), ("read_only_space", 0x05c2d): (86, "AllocationSiteWithWeakNextMap"), ("read_only_space", 0x05c55): (86, "AllocationSiteWithoutWeakNextMap"), ("read_only_space", 0x05c7d): (77, "LoadHandler1Map"), ("read_only_space", 0x05ca5): (77, "LoadHandler2Map"), ("read_only_space", 0x05ccd): (77, "LoadHandler3Map"), ("read_only_space", 0x05cf5): (78, "StoreHandler0Map"), ("read_only_space", 0x05d1d): (78, "StoreHandler1Map"), ("read_only_space", 0x05d45): (78, "StoreHandler2Map"), ("read_only_space", 0x05d6d): (78, "StoreHandler3Map"), ("map_space", 0x02119): (1057, "ExternalMap"), ("map_space", 0x02141): (1098, "JSMessageObjectMap"), } # List of known V8 objects. KNOWN_OBJECTS = { ("read_only_space", 0x021b9): "EmptyWeakFixedArray", ("read_only_space", 0x021c1): "EmptyDescriptorArray", ("read_only_space", 0x021f9): "EmptyEnumCache", ("read_only_space", 0x0222d): "EmptyFixedArray", ("read_only_space", 0x02235): "NullValue", ("read_only_space", 0x0233d): "UninitializedValue", ("read_only_space", 0x023b5): "UndefinedValue", ("read_only_space", 0x023f9): "NanValue", ("read_only_space", 0x0242d): "TheHoleValue", ("read_only_space", 0x02459): "HoleNanValue", ("read_only_space", 0x0248d): "TrueValue", ("read_only_space", 0x024cd): "FalseValue", ("read_only_space", 0x024fd): "empty_string", ("read_only_space", 0x02739): "EmptyScopeInfo", ("read_only_space", 0x02775): "ArgumentsMarker", ("read_only_space", 0x027d5): "Exception", ("read_only_space", 0x02831): "TerminationException", ("read_only_space", 0x02899): "OptimizedOut", ("read_only_space", 0x028f9): "StaleRegister", ("read_only_space", 0x03179): "EmptyPropertyArray", ("read_only_space", 0x03181): "EmptyByteArray", ("read_only_space", 0x03189): "EmptyObjectBoilerplateDescription", ("read_only_space", 0x031bd): "EmptyArrayBoilerplateDescription", ("read_only_space", 0x031c9): "EmptyClosureFeedbackCellArray", ("read_only_space", 0x031d1): "EmptySlowElementDictionary", ("read_only_space", 0x031f5): "EmptyOrderedHashMap", ("read_only_space", 0x03209): "EmptyOrderedHashSet", ("read_only_space", 0x0321d): "EmptyFeedbackMetadata", ("read_only_space", 0x03229): "EmptyPropertyDictionary", ("read_only_space", 0x03251): "EmptyOrderedPropertyDictionary", ("read_only_space", 0x03291): "NoOpInterceptorInfo", ("read_only_space", 0x032b9): "EmptyWeakArrayList", ("read_only_space", 0x032c5): "InfinityValue", ("read_only_space", 0x032d1): "MinusZeroValue", ("read_only_space", 0x032dd): "MinusInfinityValue", ("read_only_space", 0x032e9): "SelfReferenceMarker", ("read_only_space", 0x03329): "BasicBlockCountersMarker", ("read_only_space", 0x0336d): "OffHeapTrampolineRelocationInfo", ("read_only_space", 0x03379): "TrampolineTrivialCodeDataContainer", ("read_only_space", 0x03385): "TrampolinePromiseRejectionCodeDataContainer", ("read_only_space", 0x03391): "GlobalThisBindingScopeInfo", ("read_only_space", 0x033c9): "EmptyFunctionScopeInfo", ("read_only_space", 0x033f1): "NativeScopeInfo", ("read_only_space", 0x0340d): "HashSeed", ("old_space", 0x02119): "ArgumentsIteratorAccessor", ("old_space", 0x0215d): "ArrayLengthAccessor", ("old_space", 0x021a1): "BoundFunctionLengthAccessor", ("old_space", 0x021e5): "BoundFunctionNameAccessor", ("old_space", 0x02229): "ErrorStackAccessor", ("old_space", 0x0226d): "FunctionArgumentsAccessor", ("old_space", 0x022b1): "FunctionCallerAccessor", ("old_space", 0x022f5): "FunctionNameAccessor", ("old_space", 0x02339): "FunctionLengthAccessor", ("old_space", 0x0237d): "FunctionPrototypeAccessor", ("old_space", 0x023c1): "StringLengthAccessor", ("old_space",
<reponame>ZaydH/stratego # -*- coding: utf-8 -*- r""" tests.test_state ~~~~~~~~~~~~~~~~ Tests for the \p State class including movement mechanics and enumeration of the \p MoveSet class. :copyright: (c) 2019 by <NAME>. :license: MIT, see LICENSE for more details. """ from typing import Tuple import pytest from stratego import Move from stratego.location import Location from stratego.move import MoveStack from stratego.piece import Color from stratego.player import Player from stratego.state import State from testing_utils import STATES_PATH, SMALL_BRD, STD_BRD def _get_move_from_player(plyr: Player, _orig: Tuple[int, int], new: Tuple[int, int]) -> Move: r""" Get the move from (row1, col1) in \p l1 to (row2, col2) in \p l2. :param plyr: Player whose move will be extracted :param _orig: Original location to move from :param new: New location to move to :return: Move corresponding to the move pair """ available_moves = plyr.move_set.avail values = list(available_moves.values()) v = [v for v in values if v.orig == Location(*_orig) and v.new == Location(*new)] assert v return v[0] def _verify_num_pieces_and_move_set_size(state: State, num_red_p: int, num_blue_p: int, num_red_mv: int, num_blue_mv: int): r""" Verifies the number of pieces and size of the \p MoveSet :param state: State of the game :param num_red_p: Number of remaining RED pieces :param num_blue_p: Number of remaining BLUE pieces :param num_red_mv: Number of available moves for RED :param num_blue_mv: Number of available moves for BLUE """ # Standardize assert tests assert state.red.num_pieces == num_red_p assert state.blue.num_pieces == num_blue_p assert len(state.red.move_set) == num_red_mv assert len(state.blue.move_set) == num_blue_mv def test_duplicate_loc_in_state(): r""" Verify that a \p State file with two pieces in same location raises an error """ for dup_file in ["duplicate_loc_red.txt", "duplicate_loc_diff_color.txt"]: duplicate_path = STATES_PATH / dup_file assert duplicate_path.exists(), "Duplicate file path does not exist" with pytest.raises(Exception): State.importer(duplicate_path, STD_BRD) def test_no_flag(): r""" Verify an error is raised if the file has no flag """ # Verify the "clean" passes path = STATES_PATH / "no_flag_clean.txt" assert path.exists(), "No flag test file does not exist" State.importer(path, STD_BRD) # Verify no flag checks are done for both players for file in ["no_flag_red.txt", "no_flag_blue.txt"]: path = STATES_PATH / file assert path.exists(), "No flag test file does not exist" with pytest.raises(Exception): State.importer(path, STD_BRD) # noinspection PyProtectedMember def test_state_basic_moves(): r""" Verify the basic movement mechanics work without issue """ path = STATES_PATH / "state_move_verify.txt" assert path.exists(), "Move verify file does not exist" state = State.importer(path, STD_BRD) # Verify initial state matches expectations _verify_num_pieces_and_move_set_size(state, 7, 7, 4 + 3, 4 + 3) move_stack = MoveStack() # Define a series of moves. Entries in each tuple are: # 0: Original piece location # 1: Piece new location # 2: Number of red pieces # 3: Number of blue pieces # 4: Size of the red move set # 5: Size of the blue move set move_list = [((0, 1), (1, 1), 7, 7, 12, 7), ((9, 1), (8, 1), 7, 7, 12, 12), ((1, 1), (2, 1), 7, 7, 12, 12), ((8, 1), (7, 1), 7, 7, 12, 12), ((2, 1), (3, 1), 7, 7, 12, 12), ((7, 1), (6, 1), 7, 7, 12, 12), ((3, 1), (4, 1), 7, 7, 11, 12), # One less due to blocked by (4, 2) ((6, 1), (5, 1), 7, 7, 11, 11), # One less due to blocked by (5, 2) ((4, 1), (5, 1), 6, 6, 8, 8), # Both lost piece in battle ((9, 3), (6, 3), 6, 6, 8, 18), # Move blue scout ((0, 3), (3, 3), 6, 6, 18, 18), # Move red scout ((6, 3), (6, 5), 6, 6, 18, 23), # Move blue scout ((3, 3), (3, 5), 6, 6, 20, 20), # Move red scout ((6, 5), (6, 4), 6, 6, 23, 23), # Move blue scout ((3, 5), (9, 5), 6, 5, 16, 22), # Red scout attack blue spy ((6, 4), (0, 4), 6, 4, 16, 5) # Blue scout attack red bomb ] printer_out = [] for orig, new, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_list: orig, new = Location(orig[0], orig[1]), Location(new[0], new[1]) p = state.next_player.get_piece_at_loc(orig) assert p is not None attacked = state.get_other_player(state.next_player).get_piece_at_loc(new) move_stack.push(Move(p, orig, new, attacked)) assert state.update(move_stack.top()) assert state._printer._is_loc_empty(orig) _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) printer_out.append(state.write_board()) # Try to move red bomb then the red flag for orig in [Location(0, 4), Location(0, 6)]: p = state.next_player.get_piece_at_loc(orig) assert p is not None for new in [orig.left(), orig.right]: attacked = state.get_other_player(state.next_player).get_piece_at_loc(new) with pytest.raises(Exception): Move(p, orig, new, attacked) # Verify Undo for i in range(2, len(move_list) + 1): _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_list[-i] state.undo() assert state.write_board() == printer_out[-i], "Printer mismatch after do/undo" _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) def test_small_direct_attack(): r""" Test making a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 3), (7, 3), 6, 6, 5, 5) ] _helper_small_test(move_list) def test_small_move_then_attack(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 3), (1, 3), 7, 7, 19, 10), (Color.BLUE, Color.RED, (7, 3), (1, 3), 6, 6, 5, 5) ] _helper_small_test(move_list) def test_single_adjacent_scout(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 2, 2, 11, 11), (Color.BLUE, Color.BLUE, (2, 4), (2, 3), 1, 1, 0, 0) ] _helper_small_test(move_list, state_file="moveset_two_scouts_adjacent.txt") def test_scout_blocking_scout(): r""" Test making a single move with a scout then a direct attack """ move_list = [(None, None, None, None, 7, 7, 11, 11), (Color.RED, Color.BLUE, (0, 5), (1, 5), 7, 7, 14, 11), (Color.BLUE, Color.RED, (7, 3), (2, 3), 7, 7, 14, 19), (Color.RED, Color.BLUE, (1, 5), (1, 4), 7, 7, 13, 19), (Color.BLUE, Color.RED, (2, 3), (3, 3), 7, 7, 13, 13), (Color.RED, Color.BLUE, (1, 4), (2, 4), 7, 7, 14, 13), (Color.BLUE, Color.RED, (7, 2), (7, 3), 7, 7, 14, 13), (Color.RED, Color.BLUE, (0, 0), (1, 0), 7, 7, 17, 13), (Color.BLUE, Color.RED, (3, 3), (2, 3), 7, 7, 17, 16), (Color.RED, Color.BLUE, (2, 4), (3, 4), 7, 7, 16, 19), (Color.BLUE, Color.RED, (2, 3), (2, 4), 7, 7, 16, 16), (Color.RED, Color.BLUE, (1, 0), (2, 0), 7, 7, 16, 16), (Color.BLUE, Color.RED, (2, 4), (2, 1), 7, 7, 11, 19), (Color.RED, Color.BLUE, (0, 2), (1, 2), 7, 7, 16, 19), (Color.BLUE, Color.RED, (7, 5), (6, 5), 7, 7, 16, 22), (Color.RED, Color.BLUE, (2, 0), (2, 1), 7, 6, 16, 9) ] _helper_small_test(move_list, state_file="moveset_scout_block_scout.txt") # noinspection PyProtectedMember def _helper_small_test(move_info, state_file: str = "moveset_small_direct_attack.txt"): r""" Helper function for testing the movements on the small board :param move_info: List of move information. For :math:`n` moves, the length of \p move_info should be :math:`n+1`. The first element is the initial board configuration. """ path = STATES_PATH / state_file assert path.exists(), "Small direct attack state file not found" state = State.importer(path, SMALL_BRD) _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[0] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) # Test doing moves moves, brd = [], [state.write_board()] for col, other_col, l1, l2, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_info[1:]: plyr, _ = state.get_player(col), state.get_player(other_col) m = _get_move_from_player(plyr, l1, l2) moves.append(m) state.update(moves[-1]) brd.append(state.write_board()) _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) # Test undoing the moves for i in range(1, len(moves) - 1): assert brd[-i] == state.write_board() _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) assert moves[-i] == state._stack.top() # pylint: disable=protected-access state.rollback() _, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i - 1] _verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv) assert brd[-i - 1] == state.write_board() def test_if_has_move(): r""" Verify the \p piece_has_move method of the \p State class """ path = STATES_PATH / "deep_q_verify.txt" state = State.importer(path, STD_BRD) # Marshall cannot move marshall_loc = Location(0, 0) p = state.get_player(Color.RED).get_piece_at_loc(marshall_loc) assert not state.piece_has_move(p) # Rank3 can move rank3_loc = Location(1, 0) p = state.get_player(Color.RED).get_piece_at_loc(rank3_loc) assert state.piece_has_move(p) # Bomb cannot move bomb_loc = Location(0, 4) p = state.get_player(Color.RED).get_piece_at_loc(bomb_loc) assert not state.piece_has_move(p) # Flag cannot move flag_loc = Location(0, 6) p = state.get_player(Color.RED).get_piece_at_loc(flag_loc) assert not state.piece_has_move(p) # verify pieces with
""" Structure & PKCS11-specific definitions. """ from ctypes import CFUNCTYPE, Structure from pycryptoki.cryptoki.c_defs import * from pycryptoki.cryptoki.helpers import struct_def # values for unnamed enumeration CK_MECHANISM_TYPE = CK_ULONG CK_MECHANISM_TYPE_PTR = POINTER(CK_MECHANISM_TYPE) CK_USER_TYPE = CK_ULONG CK_SESSION_HANDLE = CK_ULONG CK_SESSION_HANDLE_PTR = POINTER(CK_SESSION_HANDLE) CK_OBJECT_HANDLE = CK_ULONG CK_OBJECT_HANDLE_PTR = POINTER(CK_OBJECT_HANDLE) CK_STATE = CK_ULONG CK_OBJECT_CLASS = CK_ULONG CK_OBJECT_CLASS_PTR = POINTER(CK_OBJECT_CLASS) CK_HW_FEATURE_TYPE = CK_ULONG CK_KEY_TYPE = CK_ULONG CK_CERTIFICATE_TYPE = CK_ULONG CK_ATTRIBUTE_TYPE = CK_ULONG class CK_MECHANISM(Structure): pass class CK_ATTRIBUTE(Structure): pass CK_MECHANISM_PTR = POINTER(CK_MECHANISM) CK_ATTRIBUTE_PTR = POINTER(CK_ATTRIBUTE) class CK_AES_GCM_PARAMS(Structure): pass struct_def( CK_AES_GCM_PARAMS, [ ("pIv", CK_BYTE_PTR), ("ulIvLen", CK_ULONG), ("ulIvBits", CK_ULONG), ("pAAD", CK_BYTE_PTR), ("ulAADLen", CK_ULONG), ("ulTagBits", CK_ULONG), ], ) CK_AES_GCM_PARAMS_PTR = CK_AES_GCM_PARAMS class CK_XOR_BASE_DATA_KDF_PARAMS(Structure): pass CK_EC_KDF_TYPE = CK_ULONG struct_def( CK_XOR_BASE_DATA_KDF_PARAMS, [("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR)], ) CK_XOR_BASE_DATA_KDF_PARAMS_PTR = POINTER(CK_XOR_BASE_DATA_KDF_PARAMS) class CK_AES_XTS_PARAMS(Structure): pass struct_def(CK_AES_XTS_PARAMS, [("hTweakKey", CK_OBJECT_HANDLE), ("cb", CK_BYTE * 16)]) CK_AES_XTS_PARAMS_PTR = POINTER(CK_AES_XTS_PARAMS) CK_EC_DH_PRIMITIVE = CK_ULONG CK_EC_ENC_SCHEME = CK_ULONG CK_EC_MAC_SCHEME = CK_ULONG class CK_ECIES_PARAMS(Structure): pass struct_def( CK_ECIES_PARAMS, [ ("dhPrimitive", CK_EC_DH_PRIMITIVE), ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen1", CK_ULONG), ("pSharedData1", CK_BYTE_PTR), ("encScheme", CK_EC_ENC_SCHEME), ("ulEncKeyLenInBits", CK_ULONG), ("macScheme", CK_EC_MAC_SCHEME), ("ulMacKeyLenInBits", CK_ULONG), ("ulMacLenInBits", CK_ULONG), ("ulSharedDataLen2", CK_ULONG), ("pSharedData2", CK_BYTE_PTR), ], ) CK_ECIES_PARAMS_PTR = POINTER(CK_ECIES_PARAMS) CK_KDF_PRF_TYPE = CK_ULONG CK_KDF_PRF_ENCODING_SCHEME = CK_ULONG class CK_KDF_PRF_PARAMS(Structure): pass struct_def( CK_KDF_PRF_PARAMS, [ ("prfType", CK_KDF_PRF_TYPE), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pContext", CK_BYTE_PTR), ("ulContextLen", CK_ULONG), ("ulCounter", CK_ULONG), ("ulEncodingScheme", CK_KDF_PRF_ENCODING_SCHEME), ], ) CK_PRF_KDF_PARAMS = CK_KDF_PRF_PARAMS CK_KDF_PRF_PARAMS_PTR = POINTER(CK_PRF_KDF_PARAMS) class CK_AES_CTR_PARAMS(Structure): pass CK_SEED_CTR_PARAMS = CK_AES_CTR_PARAMS CK_SEED_CTR_PARAMS_PTR = POINTER(CK_SEED_CTR_PARAMS) CK_ARIA_CTR_PARAMS = CK_AES_CTR_PARAMS CK_ARIA_CTR_PARAMS_PTR = POINTER(CK_ARIA_CTR_PARAMS) class CK_DES_CTR_PARAMS(Structure): pass struct_def(CK_DES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 8)]) CK_DES_CTR_PARAMS_PTR = POINTER(CK_DES_CTR_PARAMS) CK_AES_GMAC_PARAMS = CK_AES_GCM_PARAMS CK_AES_GMAC_PARAMS_PTR = POINTER(CK_AES_GMAC_PARAMS) class HSM_STATS_PARAMS(Structure): pass struct_def( HSM_STATS_PARAMS, [("ulId", CK_ULONG), ("ulHighValue", CK_ULONG), ("ulLowValue", CK_ULONG)] ) class CA_ROLE_STATE(Structure): pass struct_def( CA_ROLE_STATE, [ ("flags", CK_BYTE), ("loginAttemptsLeft", CK_BYTE), ("primaryAuthMech", CK_BYTE), ("secondaryAuthMech", CK_BYTE), ], ) class CK_POLICY_INFO(Structure): pass struct_def( CK_POLICY_INFO, [ ("ulId", CK_ULONG), ("ulValue", CK_ULONG), ("ulOffToOnDestructive", CK_ULONG), ("ulOnToOffDestructive", CK_ULONG), ], ) CK_POLICY_INFO_PTR = POINTER(CK_POLICY_INFO) class CA_MOFN_GENERATION(Structure): pass struct_def( CA_MOFN_GENERATION, [("ulWeight", CK_ULONG), ("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)], ) CA_MOFN_GENERATION_PTR = POINTER(CA_MOFN_GENERATION) class CA_MOFN_ACTIVATION(Structure): pass struct_def(CA_MOFN_ACTIVATION, [("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)]) CA_MOFN_ACTIVATION_PTR = POINTER(CA_MOFN_ACTIVATION) class CA_M_OF_N_STATUS(Structure): pass struct_def( CA_M_OF_N_STATUS, [ ("ulID", CK_ULONG), ("ulM", CK_ULONG), ("ulN", CK_ULONG), ("ulSecretSize", CK_ULONG), ("ulFlag", CK_ULONG), ], ) CA_MOFN_STATUS = CA_M_OF_N_STATUS CA_MOFN_STATUS_PTR = POINTER(CA_MOFN_STATUS) CKCA_MODULE_ID = CK_ULONG CKCA_MODULE_ID_PTR = POINTER(CKCA_MODULE_ID) class CKCA_MODULE_INFO(Structure): pass class CK_VERSION(Structure): pass struct_def(CK_VERSION, [("major", CK_BYTE), ("minor", CK_BYTE)]) struct_def( CKCA_MODULE_INFO, [ ("ulModuleSize", CK_ULONG), ("developerName", CK_CHAR * 32), ("moduleDescription", CK_CHAR * 32), ("moduleVersion", CK_VERSION), ], ) CKCA_MODULE_INFO_PTR = POINTER(CKCA_MODULE_INFO) class CK_HA_MEMBER(Structure): pass struct_def(CK_HA_MEMBER, [("memberSerial", CK_CHAR * 20), ("memberStatus", CK_RV)]) class CK_HA_STATUS(Structure): pass struct_def( CK_HA_STATUS, [("groupSerial", CK_CHAR * 20), ("memberList", CK_HA_MEMBER * 32), ("listSize", CK_ULONG)], ) CK_HA_MEMBER_PTR = POINTER(CK_HA_MEMBER) CK_HA_STATE_PTR = POINTER(CK_HA_STATUS) CKA_SIM_AUTH_FORM = CK_ULONG class CT_Token(Structure): pass struct_def(CT_Token, []) CT_TokenHndle = POINTER(CT_Token) class CK_AES_CBC_PAD_EXTRACT_PARAMS(Structure): pass struct_def( CK_AES_CBC_PAD_EXTRACT_PARAMS, [ ("ulType", CK_ULONG), ("ulHandle", CK_ULONG), ("ulDeleteAfterExtract", CK_ULONG), ("pBuffer", CK_BYTE_PTR), ("pulBufferLen", CK_ULONG_PTR), ("ulStorage", CK_ULONG), ("pedId", CK_ULONG), ("pbFileName", CK_BYTE_PTR), ("ctxID", CK_ULONG), ], ) CK_AES_CBC_PAD_EXTRACT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_EXTRACT_PARAMS) class CK_AES_CBC_PAD_INSERT_PARAMS(Structure): pass struct_def( CK_AES_CBC_PAD_INSERT_PARAMS, [ ("ulStorageType", CK_ULONG), ("ulContainerState", CK_ULONG), ("pBuffer", CK_BYTE_PTR), ("ulBufferLen", CK_ULONG), ("pulType", CK_ULONG_PTR), ("pulHandle", CK_ULONG_PTR), ("ulStorage", CK_ULONG), ("pedId", CK_ULONG), ("pbFileName", CK_BYTE_PTR), ("ctxID", CK_ULONG), ("ulContainerNumber", CK_ULONG), ], ) CK_AES_CBC_PAD_INSERT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_INSERT_PARAMS) class CK_CLUSTER_STATE(Structure): pass struct_def(CK_CLUSTER_STATE, [("bMembers", CK_BYTE * 32 * 8), ("ulMemberStatus", CK_ULONG * 8)]) CK_CLUSTER_STATE_PTR = POINTER(CK_CLUSTER_STATE) class CK_LKM_TOKEN_ID_S(Structure): pass struct_def(CK_LKM_TOKEN_ID_S, [("id", CK_BYTE * 20)]) CK_LKM_TOKEN_ID = CK_LKM_TOKEN_ID_S CK_LKM_TOKEN_ID_PTR = POINTER(CK_LKM_TOKEN_ID) class CK_UTILIZATION_COUNTER(Structure): pass struct_def( CK_UTILIZATION_COUNTER, [ ("ullSerialNumber", CK_ULONGLONG), ("label", CK_CHAR * 66), ("ulBindId", CK_ULONG), ("ulCounterId", CK_ULONG), ("ullCount", CK_ULONGLONG), ], ) CK_UTILIZATION_COUNTER_PTR = POINTER(CK_UTILIZATION_COUNTER) # pka class CK_KEY_STATUS(Structure): pass struct_def( CK_KEY_STATUS, [ ("flags", CK_BYTE), ("failedAuthCountLimit", CK_BYTE), ("reserved1", CK_BYTE), ("reserved2", CK_BYTE), ], ) class CK_FUNCTION_LIST(Structure): pass class CK_INFO(Structure): pass CK_INFO_PTR = POINTER(CK_INFO) class CK_SLOT_INFO(Structure): pass CK_SLOT_INFO_PTR = POINTER(CK_SLOT_INFO) class CK_TOKEN_INFO(Structure): pass CK_TOKEN_INFO_PTR = POINTER(CK_TOKEN_INFO) class CK_MECHANISM_INFO(Structure): pass CK_MECHANISM_INFO_PTR = POINTER(CK_MECHANISM_INFO) class CK_SESSION_INFO(Structure): pass CK_SESSION_INFO_PTR = POINTER(CK_SESSION_INFO) CK_VERSION_PTR = POINTER(CK_VERSION) struct_def( CK_INFO, [ ("cryptokiVersion", CK_VERSION), ("manufacturerID", CK_UTF8CHAR * 32), ("flags", CK_FLAGS), ("libraryDescription", CK_UTF8CHAR * 32), ("libraryVersion", CK_VERSION), ], ) struct_def( CK_SLOT_INFO, [ ("slotDescription", CK_UTF8CHAR * 64), ("manufacturerID", CK_UTF8CHAR * 32), ("flags", CK_FLAGS), ("hardwareVersion", CK_VERSION), ("firmwareVersion", CK_VERSION), ], ) struct_def( CK_TOKEN_INFO, [ ("label", CK_UTF8CHAR * 32), ("manufacturerID", CK_UTF8CHAR * 32), ("model", CK_UTF8CHAR * 16), ("serialNumber", CK_CHAR * 16), ("flags", CK_FLAGS), ("usMaxSessionCount", CK_ULONG), ("usSessionCount", CK_ULONG), ("usMaxRwSessionCount", CK_ULONG), ("usRwSessionCount", CK_ULONG), ("usMaxPinLen", CK_ULONG), ("usMinPinLen", CK_ULONG), ("ulTotalPublicMemory", CK_ULONG), ("ulFreePublicMemory", CK_ULONG), ("ulTotalPrivateMemory", CK_ULONG), ("ulFreePrivateMemory", CK_ULONG), ("hardwareVersion", CK_VERSION), ("firmwareVersion", CK_VERSION), ("utcTime", CK_CHAR * 16), ], ) struct_def( CK_SESSION_INFO, [("slotID", CK_SLOT_ID), ("state", CK_STATE), ("flags", CK_FLAGS), ("usDeviceError", CK_ULONG)], ) struct_def( CK_ATTRIBUTE, [("type", CK_ATTRIBUTE_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)] ) class CK_DATE(Structure): pass struct_def(CK_DATE, [("year", CK_CHAR * 4), ("month", CK_CHAR * 2), ("day", CK_CHAR * 2)]) struct_def( CK_MECHANISM, [("mechanism", CK_MECHANISM_TYPE), ("pParameter", CK_VOID_PTR), ("usParameterLen", CK_ULONG)], ) struct_def( CK_MECHANISM_INFO, [("ulMinKeySize", CK_ULONG), ("ulMaxKeySize", CK_ULONG), ("flags", CK_FLAGS)] ) CK_CREATEMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR_PTR) CK_DESTROYMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) CK_LOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) CK_UNLOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) class CK_C_INITIALIZE_ARGS(Structure): pass struct_def( CK_C_INITIALIZE_ARGS, [ ("CreateMutex", CK_CREATEMUTEX), ("DestroyMutex", CK_DESTROYMUTEX), ("LockMutex", CK_LOCKMUTEX), ("UnlockMutex", CK_UNLOCKMUTEX), ("flags", CK_FLAGS), ("pReserved", CK_VOID_PTR), ], ) CK_C_INITIALIZE_ARGS_PTR = POINTER(CK_C_INITIALIZE_ARGS) CK_RSA_PKCS_MGF_TYPE = CK_ULONG CK_RSA_PKCS_MGF_TYPE_PTR = POINTER(CK_RSA_PKCS_MGF_TYPE) CK_RSA_PKCS_OAEP_SOURCE_TYPE = CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR = POINTER(CK_RSA_PKCS_OAEP_SOURCE_TYPE) class CK_RSA_PKCS_OAEP_PARAMS(Structure): pass struct_def( CK_RSA_PKCS_OAEP_PARAMS, [ ("hashAlg", CK_MECHANISM_TYPE), ("mgf", CK_RSA_PKCS_MGF_TYPE), ("source", CK_RSA_PKCS_OAEP_SOURCE_TYPE), ("pSourceData", CK_VOID_PTR), ("ulSourceDataLen", CK_ULONG), ], ) CK_RSA_PKCS_OAEP_PARAMS_PTR = POINTER(CK_RSA_PKCS_OAEP_PARAMS) class CK_RSA_PKCS_PSS_PARAMS(Structure): pass struct_def( CK_RSA_PKCS_PSS_PARAMS, [("hashAlg", CK_MECHANISM_TYPE), ("mgf", CK_RSA_PKCS_MGF_TYPE), ("usSaltLen", CK_ULONG)], ) CK_RSA_PKCS_PSS_PARAMS_PTR = POINTER(CK_RSA_PKCS_PSS_PARAMS) class CK_ECDH1_DERIVE_PARAMS(Structure): pass struct_def( CK_ECDH1_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_ECDH1_DERIVE_PARAMS_PTR = POINTER(CK_ECDH1_DERIVE_PARAMS) class CK_ECDH2_DERIVE_PARAMS(Structure): pass struct_def( CK_ECDH2_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ], ) CK_ECDH2_DERIVE_PARAMS_PTR = POINTER(CK_ECDH2_DERIVE_PARAMS) class CK_ECMQV_DERIVE_PARAMS(Structure): pass struct_def( CK_ECMQV_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ("publicKey", CK_OBJECT_HANDLE), ], ) CK_ECMQV_DERIVE_PARAMS_PTR = POINTER(CK_ECMQV_DERIVE_PARAMS) CK_X9_42_DH_KDF_TYPE = CK_ULONG CK_X9_42_DH_KDF_TYPE_PTR = POINTER(CK_X9_42_DH_KDF_TYPE) class CK_X9_42_DH1_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_DH1_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_X9_42_DH1_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH1_DERIVE_PARAMS) class CK_X9_42_DH2_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_DH2_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ], ) CK_X9_42_DH2_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH2_DERIVE_PARAMS) class CK_X9_42_MQV_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_MQV_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ("publicKey", CK_OBJECT_HANDLE), ], ) CK_X9_42_MQV_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_MQV_DERIVE_PARAMS) class CK_KEA_DERIVE_PARAMS(Structure): pass struct_def( CK_KEA_DERIVE_PARAMS, [ ("isSender", CK_BBOOL), ("ulRandomLen", CK_ULONG), ("pRandomA", CK_BYTE_PTR), ("pRandomB", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_KEA_DERIVE_PARAMS_PTR = POINTER(CK_KEA_DERIVE_PARAMS) CK_RC2_PARAMS = CK_ULONG CK_RC2_PARAMS_PTR = POINTER(CK_RC2_PARAMS) class CK_RC2_CBC_PARAMS(Structure): pass struct_def(CK_RC2_CBC_PARAMS, [("usEffectiveBits", CK_ULONG), ("iv", CK_BYTE * 8)]) CK_RC2_CBC_PARAMS_PTR = POINTER(CK_RC2_CBC_PARAMS) class CK_RC2_MAC_GENERAL_PARAMS(Structure): pass struct_def(CK_RC2_MAC_GENERAL_PARAMS, [("usEffectiveBits", CK_ULONG), ("ulMacLength", CK_ULONG)]) CK_RC2_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC2_MAC_GENERAL_PARAMS) class CK_RC5_PARAMS(Structure): pass struct_def(CK_RC5_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG)]) CK_RC5_PARAMS_PTR = POINTER(CK_RC5_PARAMS) class CK_RC5_CBC_PARAMS(Structure): pass struct_def( CK_RC5_CBC_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("pIv", CK_BYTE_PTR), ("ulIvLen", CK_ULONG)], ) CK_RC5_CBC_PARAMS_PTR = POINTER(CK_RC5_CBC_PARAMS) class CK_RC5_MAC_GENERAL_PARAMS(Structure): pass struct_def( CK_RC5_MAC_GENERAL_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("ulMacLength", CK_ULONG)], ) CK_RC5_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC5_MAC_GENERAL_PARAMS) CK_MAC_GENERAL_PARAMS = CK_ULONG CK_MAC_GENERAL_PARAMS_PTR = POINTER(CK_MAC_GENERAL_PARAMS) class CK_DES_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_DES_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 8), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_DES_CBC_ENCRYPT_DATA_PARAMS) class CK_AES_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_AES_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_AES_CBC_ENCRYPT_DATA_PARAMS) class CK_SKIPJACK_PRIVATE_WRAP_PARAMS(Structure): pass struct_def( CK_SKIPJACK_PRIVATE_WRAP_PARAMS, [ ("usPasswordLen", CK_ULONG), ("pPassword", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPAndGLen", CK_ULONG), ("ulQLen", CK_ULONG), ("ulRandomLen", CK_ULONG), ("pRandomA", CK_BYTE_PTR), ("pPrimeP", CK_BYTE_PTR), ("pBaseG", CK_BYTE_PTR), ("pSubprimeQ", CK_BYTE_PTR), ], ) CK_SKIPJACK_PRIVATE_WRAP_PTR = POINTER(CK_SKIPJACK_PRIVATE_WRAP_PARAMS) class CK_SKIPJACK_RELAYX_PARAMS(Structure): pass struct_def( CK_SKIPJACK_RELAYX_PARAMS, [ ("ulOldWrappedXLen", CK_ULONG), ("pOldWrappedX", CK_BYTE_PTR), ("ulOldPasswordLen", CK_ULONG), ("pOldPassword", CK_BYTE_PTR), ("ulOldPublicDataLen", CK_ULONG), ("pOldPublicData", CK_BYTE_PTR), ("ulOldRandomLen", CK_ULONG), ("pOldRandomA", CK_BYTE_PTR), ("ulNewPasswordLen", CK_ULONG), ("pNewPassword", CK_BYTE_PTR), ("ulNewPublicDataLen", CK_ULONG), ("pNewPublicData", CK_BYTE_PTR), ("ulNewRandomLen", CK_ULONG), ("pNewRandomA", CK_BYTE_PTR), ], ) CK_SKIPJACK_RELAYX_PARAMS_PTR = POINTER(CK_SKIPJACK_RELAYX_PARAMS) class CK_PBE_PARAMS(Structure): pass struct_def( CK_PBE_PARAMS, [ ("pInitVector", CK_BYTE_PTR), ("pPassword", CK_UTF8CHAR_PTR), ("usPasswordLen", CK_ULONG), ("pSalt", CK_BYTE_PTR), ("usSaltLen", CK_ULONG), ("usIteration", CK_ULONG), ], ) CK_PBE_PARAMS_PTR = POINTER(CK_PBE_PARAMS) class CK_KEY_WRAP_SET_OAEP_PARAMS(Structure): pass struct_def( CK_KEY_WRAP_SET_OAEP_PARAMS, [("bBC", CK_BYTE), ("pX", CK_BYTE_PTR), ("ulXLen", CK_ULONG)] ) CK_KEY_WRAP_SET_OAEP_PARAMS_PTR = POINTER(CK_KEY_WRAP_SET_OAEP_PARAMS) class CK_SSL3_RANDOM_DATA(Structure): pass struct_def( CK_SSL3_RANDOM_DATA, [ ("pClientRandom", CK_BYTE_PTR), ("ulClientRandomLen", CK_ULONG), ("pServerRandom", CK_BYTE_PTR), ("ulServerRandomLen", CK_ULONG), ], ) class CK_SSL3_MASTER_KEY_DERIVE_PARAMS(Structure): pass struct_def( CK_SSL3_MASTER_KEY_DERIVE_PARAMS, [("RandomInfo", CK_SSL3_RANDOM_DATA), ("pVersion", CK_VERSION_PTR)], ) CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_SSL3_MASTER_KEY_DERIVE_PARAMS) class CK_SSL3_KEY_MAT_OUT(Structure): pass struct_def( CK_SSL3_KEY_MAT_OUT, [ ("hClientMacSecret", CK_OBJECT_HANDLE), ("hServerMacSecret", CK_OBJECT_HANDLE), ("hClientKey", CK_OBJECT_HANDLE), ("hServerKey", CK_OBJECT_HANDLE), ("pIVClient", CK_BYTE_PTR), ("pIVServer", CK_BYTE_PTR), ], ) CK_SSL3_KEY_MAT_OUT_PTR = POINTER(CK_SSL3_KEY_MAT_OUT) class CK_SSL3_KEY_MAT_PARAMS(Structure): pass struct_def( CK_SSL3_KEY_MAT_PARAMS, [ ("ulMacSizeInBits", CK_ULONG), ("ulKeySizeInBits", CK_ULONG), ("ulIVSizeInBits", CK_ULONG), ("bIsExport", CK_BBOOL), ("RandomInfo", CK_SSL3_RANDOM_DATA), ("pReturnedKeyMaterial", CK_SSL3_KEY_MAT_OUT_PTR), ], ) CK_SSL3_KEY_MAT_PARAMS_PTR = POINTER(CK_SSL3_KEY_MAT_PARAMS) class CK_TLS_PRF_PARAMS(Structure): pass struct_def( CK_TLS_PRF_PARAMS, [ ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pOutput", CK_BYTE_PTR), ("pulOutputLen", CK_ULONG_PTR), ], ) CK_TLS_PRF_PARAMS_PTR = POINTER(CK_TLS_PRF_PARAMS) class CK_WTLS_RANDOM_DATA(Structure): pass struct_def( CK_WTLS_RANDOM_DATA, [ ("pClientRandom", CK_BYTE_PTR), ("ulClientRandomLen", CK_ULONG), ("pServerRandom", CK_BYTE_PTR), ("ulServerRandomLen", CK_ULONG), ], ) CK_WTLS_RANDOM_DATA_PTR = POINTER(CK_WTLS_RANDOM_DATA) class CK_WTLS_MASTER_KEY_DERIVE_PARAMS(Structure): pass struct_def( CK_WTLS_MASTER_KEY_DERIVE_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("RandomInfo", CK_WTLS_RANDOM_DATA), ("pVersion", CK_BYTE_PTR), ], ) CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_WTLS_MASTER_KEY_DERIVE_PARAMS) class CK_WTLS_PRF_PARAMS(Structure): pass struct_def( CK_WTLS_PRF_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pOutput", CK_BYTE_PTR), ("pulOutputLen", CK_ULONG_PTR), ], ) CK_WTLS_PRF_PARAMS_PTR = POINTER(CK_WTLS_PRF_PARAMS) class CK_WTLS_KEY_MAT_OUT(Structure): pass struct_def( CK_WTLS_KEY_MAT_OUT, [("hMacSecret", CK_OBJECT_HANDLE), ("hKey", CK_OBJECT_HANDLE), ("pIV", CK_BYTE_PTR)], ) CK_WTLS_KEY_MAT_OUT_PTR = POINTER(CK_WTLS_KEY_MAT_OUT) class CK_WTLS_KEY_MAT_PARAMS(Structure): pass struct_def( CK_WTLS_KEY_MAT_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("ulMacSizeInBits", CK_ULONG), ("ulKeySizeInBits", CK_ULONG), ("ulIVSizeInBits", CK_ULONG), ("ulSequenceNumber", CK_ULONG), ("bIsExport", CK_BBOOL), ("RandomInfo", CK_WTLS_RANDOM_DATA), ("pReturnedKeyMaterial", CK_WTLS_KEY_MAT_OUT_PTR), ], ) CK_WTLS_KEY_MAT_PARAMS_PTR = POINTER(CK_WTLS_KEY_MAT_PARAMS) class CK_CMS_SIG_PARAMS(Structure): pass struct_def( CK_CMS_SIG_PARAMS, [ ("certificateHandle", CK_OBJECT_HANDLE), ("pSigningMechanism", CK_MECHANISM_PTR), ("pDigestMechanism", CK_MECHANISM_PTR), ("pContentType", CK_UTF8CHAR_PTR), ("pRequestedAttributes", CK_BYTE_PTR), ("ulRequestedAttributesLen", CK_ULONG), ("pRequiredAttributes", CK_BYTE_PTR), ("ulRequiredAttributesLen", CK_ULONG), ], ) CK_CMS_SIG_PARAMS_PTR = POINTER(CK_CMS_SIG_PARAMS) class CK_KEY_DERIVATION_STRING_DATA(Structure): pass struct_def(CK_KEY_DERIVATION_STRING_DATA, [("pData", CK_BYTE_PTR), ("ulLen", CK_ULONG)]) CK_KEY_DERIVATION_STRING_DATA_PTR = POINTER(CK_KEY_DERIVATION_STRING_DATA) CK_EXTRACT_PARAMS = CK_ULONG CK_EXTRACT_PARAMS_PTR = POINTER(CK_EXTRACT_PARAMS) CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE = CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR = POINTER(CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE) CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE = CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR = POINTER(CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE) class CK_PKCS5_PBKD2_PARAMS(Structure): pass struct_def( CK_PKCS5_PBKD2_PARAMS, [ ("saltSource", CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE), ("pSaltSourceData", CK_VOID_PTR), ("ulSaltSourceDataLen", CK_ULONG), ("iterations", CK_ULONG), ("prf", CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE), ("pPrfData", CK_VOID_PTR), ("ulPrfDataLen", CK_ULONG), ("pPassword", CK_UTF8CHAR_PTR), ("usPasswordLen", CK_ULONG), ], ) CK_PKCS5_PBKD2_PARAMS_PTR = POINTER(CK_PKCS5_PBKD2_PARAMS) CK_OTP_PARAM_TYPE = CK_ULONG CK_PARAM_TYPE = CK_OTP_PARAM_TYPE class CK_OTP_PARAM(Structure): pass struct_def( CK_OTP_PARAM, [("type", CK_OTP_PARAM_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)] ) CK_OTP_PARAM_PTR = POINTER(CK_OTP_PARAM) class CK_OTP_PARAMS(Structure): pass struct_def(CK_OTP_PARAMS, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)]) CK_OTP_PARAMS_PTR = POINTER(CK_OTP_PARAMS) class CK_OTP_SIGNATURE_INFO(Structure): pass struct_def(CK_OTP_SIGNATURE_INFO, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)]) CK_OTP_SIGNATURE_INFO_PTR = POINTER(CK_OTP_SIGNATURE_INFO) class CK_KIP_PARAMS(Structure): pass struct_def( CK_KIP_PARAMS, [ ("pMechanism", CK_MECHANISM_PTR), ("hKey", CK_OBJECT_HANDLE), ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ], ) CK_KIP_PARAMS_PTR = POINTER(CK_KIP_PARAMS) struct_def(CK_AES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)]) CK_AES_CTR_PARAMS_PTR = POINTER(CK_AES_CTR_PARAMS) class CK_CAMELLIA_CTR_PARAMS(Structure): pass struct_def(CK_CAMELLIA_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)]) CK_CAMELLIA_CTR_PARAMS_PTR = POINTER(CK_CAMELLIA_CTR_PARAMS) class
''' Parse the Versa Literate (Markdown) serialization of Versa Proper entry point of use is versa.serial.literate see: doc/literate_format.md ''' import re import itertools import markdown from amara3 import iri # for absolutize & matches_uri_syntax from amara3.uxml import html5 from amara3.uxml.tree import treebuilder, element, text from amara3.uxml.treeutil import * from versa.contrib import mkdcomments from versa import I, VERSA_BASEIRI from versa.contrib.datachefids import idgen # Temp until amara3-xml fix to add comment.xmnl_name # from amara3.uxml.tree import comment TEXT_VAL, RES_VAL, UNKNOWN_VAL = 1, 2, 3 TYPE_REL = VERSA_BASEIRI('type') # IRI ref candidate IRIREF_CAND_PAT = re.compile('<(.+)?>') # Does not support the empty URL <> as a property name # REL_PAT = re.compile('((<(.+)>)|([@\\-_\\w#/]+)):\s*((<(.+)>)|("(.*?)")|(\'(.*?)\')|(.*))', re.DOTALL) REL_PAT = re.compile('((<(.+)>)|([@\\-_\\w#/]+)):\s*((<(.+)>)|("(.*)")|(\'(.*)\')|(.*))', re.DOTALL) # URI_ABBR_PAT = re.compile('@([\\-_\\w]+)([#/@])(.+)', re.DOTALL) URI_EXPLICIT_PAT = re.compile('<(.+)>', re.DOTALL) # Does not support the empty URL <> as a property name RESOURCE_STR = '([^\s\\[\\]]+)?\s?(\\[([^\s\\[\\]]*?)\\])?' RESOURCE_PAT = re.compile(RESOURCE_STR) AB_RESOURCE_PAT = re.compile('<\s*' + RESOURCE_STR + '\s*>') HEADER_PAT = re.compile('h\\d') ''' >>> import re >>> RESOURCE_PAT = re.compile('([^\s\\[\\]]+)?\s?(\\[([^\s\\[\\]]*?)\\])?') >>> m = RESOURCE_PAT.match("ResourceID") >>> m.groups() ('ResourceID', None, None) >>> m = RESOURCE_PAT.match("[ResourceType]") >>> m.groups() (None, '[ResourceType]', 'ResourceType') >>> m = RESOURCE_PAT.match("ResourceID [ResourceType]") >>> m.groups() ('ResourceID', '[ResourceType]', 'ResourceType') >>> m = RESOURCE_PAT.match("[]") >>> m.groups() (None, '[]', '') ''' def handle_resourcelist(ltext, **kwargs): ''' A helper that converts lists of resources from a textual format such as Markdown, including absolutizing relative IRIs ''' base = kwargs.get('base', VERSA_BASEIRI) model = kwargs.get('model') iris = ltext.strip().split() newlist = model.generate_resource() for i in iris: model.add(newlist, VERSA_BASEIRI + 'item', I(iri.absolutize(i, base))) return newlist def handle_resourceset(ltext, **kwargs): ''' A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs ''' fullprop=kwargs.get('fullprop') rid=kwargs.get('rid') base=kwargs.get('base', VERSA_BASEIRI) model=kwargs.get('model') iris = ltext.strip().split() for i in iris: model.add(rid, fullprop, I(iri.absolutize(i, base))) return None PREP_METHODS = { VERSA_BASEIRI + 'text': lambda x, **kwargs: x, VERSA_BASEIRI + 'resource': lambda x, base=VERSA_BASEIRI, **kwargs: I(iri.absolutize(x, base)), VERSA_BASEIRI + 'resourceset': handle_resourceset, } def parse(md, model, encoding='utf-8', config=None): """ Translate the Versa Markdown syntax into Versa model relationships md -- markdown source text model -- Versa model to take the output relationship encoding -- character encoding (defaults to UTF-8) Returns: The overall base URI (`@base`) specified in the Markdown file, or None >>> from versa.driver.memory import newmodel >>> from versa.serial.literate import parse >>> m = newmodel() >>> parse(open('test/resource/poetry.md').read(), m) 'http://uche.ogbuji.net/poems/' >>> m.size() 40 >>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15')) (I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {}) """ #Set up configuration to interpret the conventions for the Markdown config = config or {} #This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources syntaxtypemap = {} if config.get('autotype-h1'): syntaxtypemap['h1'] = config.get('autotype-h1') if config.get('autotype-h2'): syntaxtypemap['h2'] = config.get('autotype-h2') if config.get('autotype-h3'): syntaxtypemap['h3'] = config.get('autotype-h3') interp_stanza = config.get('interpretations', {}) interpretations = {} def setup_interpretations(interp): #Map the interpretation IRIs to functions to do the data prep for prop, interp_key in interp.items(): if interp_key.startswith('@'): interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI) if interp_key in PREP_METHODS: interpretations[prop] = PREP_METHODS[interp_key] else: #just use the identity, i.e. no-op interpretations[prop] = lambda x, **kwargs: x setup_interpretations(interp_stanza) #Prep ID generator, in case needed idg = idgen(None) #Preprocess the Markdown to deal with IRI-valued property values def iri_ref_tool(m): body = m.group(1) lchar = '&lt;' if iri.matches_uri_ref_syntax(body ) else '<' return lchar + m.group(1) + '>' md = IRIREF_CAND_PAT.sub(iri_ref_tool, md) #Parse the Markdown #Alternately: #from xml.sax.saxutils import escape, unescape #h = markdown.markdown(escape(md.decode(encoding)), output_format='html5') #Note: even using safe_mode this should not be presumed safe from tainted input #h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5') comments = mkdcomments.CommentsExtension() h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments]) #doc = html.markup_fragment(inputsource.text(h.encode('utf-8'))) tb = treebuilder() h = '<html>' + h + '</html>' root = html5.parse(h) #root = tb.parse(h) #Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest first_h1 = next(select_name(descendants(root), 'h1')) #top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2')) # Extract header elements. Notice I use an empty element with an empty parent as the default result docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"] sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")] def fields(sect): ''' Each section represents a resource and contains a list with its properties This generator parses the list and yields the key value pairs representing the properties Some properties have attributes, expressed in markdown as a nested list. If present these attributes Are yielded as well, else None is yielded ''' #import logging; logging.debug(repr(sect)) #Pull all the list elements until the next header. This accommodates multiple lists in a section try: sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect))) except StopIteration: return #results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3') #field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ] field_list = [ li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li') ] def parse_li(pair): ''' Parse each list item into a property pair ''' if pair.strip(): matched = REL_PAT.match(pair) if not matched: raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair))) if matched.group(3): prop = matched.group(3).strip() if matched.group(4): prop = matched.group(4).strip() if matched.group(7): val = matched.group(7).strip() typeindic = RES_VAL elif matched.group(9): val = matched.group(9).strip() typeindic = TEXT_VAL elif matched.group(11): val = matched.group(11).strip() typeindic = TEXT_VAL elif matched.group(12): val = matched.group(12).strip() typeindic = UNKNOWN_VAL else: val = '' typeindic = UNKNOWN_VAL #prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ] #import logging; logging.debug(repr((prop, val))) return prop, val, typeindic return None, None, None def prep_li(li): ''' Take care of Markdown parsing minutiae. Also, Exclude child uls * a/href embedded in the li means it was specified as <link_text>. Restore the angle brackets as expected by the li parser * Similar for cases where e.g. prop: <abc> gets turned into prop: <abc></abc> ''' prepped = '' for ch in itertools.takewhile( lambda x: not (isinstance(x, element) and x.xml_name == 'ul'), li.xml_children ): if isinstance(ch, text): prepped += ch elif isinstance(ch, element): if ch.xml_name == 'a': prepped += '<' + ch.xml_value + '>' else: prepped += '<' + ch.xml_name + '>' return prepped #Go through each list item for li in field_list: #Is there a nested list, which expresses attributes on a property if list(select_name(li, 'ul')): #main = ''.join([ node.xml_value # for node in itertools.takewhile( # lambda x: x.xml_name != 'ul', select_elements(li) # ) # ]) main = prep_li(li) prop, val, typeindic = parse_li(main) subfield_list = [ parse_li(prep_li(sli)) for e in select_name(li, 'ul') for sli in ( select_name(e, 'li') ) ] subfield_list = [ (p, v, t) for (p, v, t) in subfield_list if p is not None ] #Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader if val is None: val = '' yield prop, val, typeindic, subfield_list #Just a regular, unadorned property else: prop, val, typeindic = parse_li(prep_li(li)) if prop: yield prop, val, typeindic, None iris = {} # Gather the document-level metadata from the @docheader section base = schemabase = rtbase = document_iri = default_lang = None for prop, val, typeindic, subfield_list in fields(docheader): #The @iri section is where key IRI prefixes can be set if prop == '@iri': for (k, uri, typeindic) in subfield_list: if k == '@base': base = schemabase = rtbase = uri # @property is legacy elif k == '@schema' or k == '@property': schemabase = uri elif k == '@resource-type': rtbase = uri else: iris[k] = uri #The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship elif prop == '@interpretations': #Iterate over items from the @docheader/@interpretations section to set up for further parsing interp = {} for k, v, x in subfield_list: interp[I(iri.absolutize(k, schemabase))] = v setup_interpretations(interp) #Setting an IRI for this very document being parsed elif prop == '@document': document_iri = val elif prop == '@language': default_lang = val #If we have a resource to which to attach them, just attach all other properties elif document_iri or base: rid = document_iri or base fullprop = I(iri.absolutize(prop, schemabase or base)) if fullprop in interpretations: val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model) if val is not None: model.add(rid, fullprop, val) else: model.add(rid, fullprop, val) #Default IRI prefixes if @iri/@base is set
InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), ["127.0.0.0", "127.0.0.1", "127.0.0.2", "127.0.0.3"]), (ipaddress.ip_network("2001:db8::/126"), ["2001:db8::", "2001:db8::1", "2001:db8::2", "2001:db8::3"]), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), ["1", "", "3"]), (IterableObject([1, "hello", 3]), ["1", "hello", "3"]), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_RATIONAL), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeInBlocklistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]), ("1234567890", InputDataTypeInBlocklistExc), (b"\x00\x00\x00\x00", InputDataTypeInBlocklistExc), (b"abcdef", InputDataTypeInBlocklistExc), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), InputDataTypeInBlocklistExc), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), ["5", "6", "7", "8", "9", "10", "11", "12", "13", "14"]), (sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]), (sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]), (sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]), (sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]), ((i * i for i in range(10)), ["0", "1", "4", "9", "16", "25", "36", "49", "64", "81"]), (map(lambda x: x + "000", ("1", "2", "3")), ["1000", "2000", "3000"]), (map(lambda x: x ** 2, range(5)), ["0", "1", "4", "9", "16"]), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), ["123", "789456", "\r\n9\t"]), (IterableObject([]), []), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), ["-555", "2.999", "True", "\v+123_000\f", "999"]), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list(["-789", "False", "5.5"])), (IterableObject(range(1, 10, 2)), ["1", "3", "5", "7", "9"]), (IterableObject("886644"), ["8", "8", "6", "6", "4", "4"]), (IterableObject(b"abc"), ["97", "98", "99"]), (IterableObject(bytearray(b"abc")), ["97", "98", "99"]), (ExceptionRaisingIterableObject(raise_=False), ["-123"]), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeInBlocklistExc), ("", InputDataTypeInBlocklistExc), (b"", InputDataTypeInBlocklistExc), (("abc" for _ in range(0)), []), (("abc" for _ in range(1)), ["abc"]), ((theoretical_testutils.EmptyObject() for _ in range(0)), []), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), ["1t", "2t", "3t"]), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], ["789", "inf", "True"]), ([789, float("-inf"), True], ["789", "-inf", "True"]), ([789, float("nan"), True], ["789", "nan", "True"]), ([789, "", True], ["789", "", "True"]), ((789, "", True), ["789", "", "True"]), ({789, "", True}, ignore_order_of_output_list(["789", "", "True"])), ([789, "Hello World!", True], ["789", "Hello World!", "True"]), ((789, "Hello World!", True), ["789", "Hello World!", "True"]), ({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeInBlocklistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeInBlocklistExc), ("-123", InputDataTypeInBlocklistExc), ("123_000", InputDataTypeInBlocklistExc), ("hello", InputDataTypeInBlocklistExc), (None, InputDataNotConvertibleExc), (False, InputDataNotConvertibleExc), (True, InputDataNotConvertibleExc), (-123, InputDataNotConvertibleExc), (0, InputDataNotConvertibleExc), (123, InputDataNotConvertibleExc), (-123.5, InputDataNotConvertibleExc), (-0.0, InputDataNotConvertibleExc), (0.0, InputDataNotConvertibleExc), (123.5, InputDataNotConvertibleExc), (float("inf"), InputDataNotConvertibleExc), (float("nan"), InputDataNotConvertibleExc), (int, InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject, InputDataNotConvertibleExc), (datetime.datetime.now(), InputDataNotConvertibleExc), (datetime.datetime.now().date(), InputDataNotConvertibleExc), (datetime.datetime.now().time(), InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), ["127.0.0.0", "127.0.0.1", "127.0.0.2", "127.0.0.3"]), (ipaddress.ip_network("2001:db8::/126"), ["2001:db8::", "2001:db8::1", "2001:db8::2", "2001:db8::3"]), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), ["1", "", "3"]), (IterableObject([1, "hello", 3]), ["1", "hello", "3"]), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_STRICT), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeNotInAllowlistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]), ("1234567890", InputDataTypeNotInAllowlistExc), (b"\x00\x00\x00\x00", InputDataTypeNotInAllowlistExc), (b"abcdef", InputDataTypeNotInAllowlistExc), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), InputDataTypeNotInAllowlistExc), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), InputDataTypeNotInAllowlistExc), (sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]), (sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]), (sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]), (sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]), ((i * i for i in range(10)), InputDataTypeNotInAllowlistExc), (map(lambda x: x + "000", ("1", "2", "3")), InputDataTypeNotInAllowlistExc), (map(lambda x: x ** 2, range(5)), InputDataTypeNotInAllowlistExc), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), InputDataTypeNotInAllowlistExc), (IterableObject([]), InputDataTypeNotInAllowlistExc), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), InputDataTypeNotInAllowlistExc), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), InputDataTypeNotInAllowlistExc), (IterableObject(range(1, 10, 2)), InputDataTypeNotInAllowlistExc), (IterableObject("886644"), InputDataTypeNotInAllowlistExc), (IterableObject(b"abc"), InputDataTypeNotInAllowlistExc), (IterableObject(bytearray(b"abc")), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=False), InputDataTypeNotInAllowlistExc), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeNotInAllowlistExc), ("", InputDataTypeNotInAllowlistExc), (b"", InputDataTypeNotInAllowlistExc), (("abc" for _ in range(0)), InputDataTypeNotInAllowlistExc), (("abc" for _ in range(1)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(0)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), InputDataTypeNotInAllowlistExc), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], ["789", "inf", "True"]), ([789, float("-inf"), True], ["789", "-inf", "True"]), ([789, float("nan"), True], ["789", "nan", "True"]), ([789, "", True], ["789", "", "True"]), ((789, "", True), ["789", "", "True"]), ({789, "", True}, ignore_order_of_output_list(["789", "", "True"])), ([789, "Hello World!", True], ["789", "Hello World!", "True"]), ((789, "Hello World!", True), ["789", "Hello World!", "True"]), ({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeNotInAllowlistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeNotInAllowlistExc), ("-123", InputDataTypeNotInAllowlistExc), ("123_000", InputDataTypeNotInAllowlistExc), ("hello", InputDataTypeNotInAllowlistExc), (None, InputDataTypeNotInAllowlistExc), (False, InputDataTypeNotInAllowlistExc), (True, InputDataTypeNotInAllowlistExc), (-123, InputDataTypeNotInAllowlistExc), (0, InputDataTypeNotInAllowlistExc), (123, InputDataTypeNotInAllowlistExc), (-123.5, InputDataTypeNotInAllowlistExc), (-0.0, InputDataTypeNotInAllowlistExc), (0.0, InputDataTypeNotInAllowlistExc), (123.5, InputDataTypeNotInAllowlistExc), (float("inf"), InputDataTypeNotInAllowlistExc), (float("nan"), InputDataTypeNotInAllowlistExc), (int, InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject, InputDataTypeNotInAllowlistExc), (datetime.datetime.now(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().date(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().time(), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("127.0.0.1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("::1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), # ParseResult is a subclass of tuple!!! (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc), (IterableObject([1, "", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, "hello", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListDeduplicateItemsFilter(),)), ( (["1", 2, 3.1], [1, 2, 3]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (["1", True, 2.9, "\r\n2\t", "\v\f 3 ", 3], [1, 2, 3]), ((float(i % 2) for i in range(20)), [0, 1]), ([1, 2, 2, 2, 3, 3, 4], [1, 2, 3, 4]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListSortFilter(None, reverse_order=False),)), ( ([], []), ([123], [123]), ([100, True, -100, "\r\n000_3 ", 0, 2.999, 4, "6", 5], [-100, 0, 1, 2, 3, 4, 5, 6, 100]), (range(10, 0, -1), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), ([1, 1, 2, 1, 3, 5, 4, 4, 5, 2, 3], [1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]), ((str(i) for i in range(10)), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListSortFilter(None, reverse_order=True),)), ( ([], []), ([123], [123]), ([100, True, -100, "\r\n000_3 ", 0, 2.999, 4, "6", 5], [100, 6, 5, 4, 3, 2, 1, 0, -100]), (range(10, 0, -1), [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), ([1, 1, 2, 1, 3, 5, 4, 4, 5, 2, 3], [5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 1]), ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]), ((str(i) for i in range(10)), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(lambda item: item.get_id(), reverse_order=False),)), ( ([], []), ( [CustomTestListItem(3, "a"), CustomTestListItem(1, "c"),
<reponame>pjh/vm-analyze<gh_stars>1-10 # Virtual memory analysis scripts. # Developed 2012-2014 by <NAME>, <EMAIL> # Copyright (c) 2012-2014 <NAME> and University of Washington from util.pjh_utils import * import conf.system_conf as sysconf import datetime import os import shlex import shutil import signal import subprocess import sys import time # Linux "perf" tool: # On verbena, there are just two "generic" hw performance counters, so # when more than two events are selected here, the events are multiplexed # over the counters during the "perf record" run. So, for example, if # four events are active here, they will each only be counted for about # 50% of the record's execution. # To see the full list of available events, run "perf list". PERF_EVENTS_WALKCYCLES = [ 'r408', # stjohns: DTLB_LOAD_MISSES.WALK_CYCLES 'r449', # stjohns: DTLB_MISSES.WALK_CYCLES 'cycles', #'r108', # stjohns: DTLB_LOAD_MISSES.ANY: same as dTLB-load-misses #'r149', # stjohns: DTLB_MISSES.ANY: slightly greater than dTLB-load-misses # plus dTLB-store-misses... not quite sure what the difference is. ] PERF_EVENTS_TO_STR = { 'r108' : 'dTLB-load-misses', 'r149' : 'dTLB-misses', 'r408' : 'dTLB-load-walkcycles', 'r449' : 'DTLB walk cycles', } PERF_EVENTS_OLD = [ 'dTLB-loads', 'dTLB-load-misses', 'dTLB-stores', 'dTLB-store-misses', #'dTLB-prefetches', #'dTLB-prefetch-misses', # 'iTLB-loads', # 'iTLB-load-misses', #'L1-dcache-loads', #'L1-dcache-load-misses', #'L1-dcache-stores', #'L1-dcache-store-misses', #'L1-dcache-prefetches', #'L1-dcache-prefetch-misses', #'L1-icache-loads', #'L1-icache-load-misses', #'L1-icache-prefetches', #'L1-icache-prefetch-misses', #'LLC-loads', #'LLC-load-misses', #'LLC-stores', #'LLC-store-misses', #'LLC-prefetches', #'LLC-prefetch-misses', ] #PERF_EVENTS = PERF_EVENTS_OLD PERF_EVENTS = PERF_EVENTS_WALKCYCLES PERF_FREQ = 1000 # By using the -F flag to perf record, the number of events counted # before generating a sample interrupt will be dynamically tuned to # sample approximately F times every second (so higher PERF_FREQ == # more samples with smaller periods). For example, set to 1000 # to get 1000 samples/s, or about 1 sample every millisecond. (going # lower than 1 ms is probably a poor idea). # I verified that this is right - for a small graph500 run, -F 100 # captures 1473 dTLB-load samples, whereas -F 1000 captures about # 10x as many, 14575 samples. # I examined the plot output for Graph500, and using -F 1000 results # in a less-jagged plot than using -F 100, with no other apparent # side effects other than 10x more samples. The general calculated # miss rates look pretty much the same, so a less-jagged plot seems # to indicate a more-accurate plot. I guess I'll stick with -F 1000 # for now, unless it seems to cause sample periods to become too # small, which could potentially be problematic... PERF_CMD = "{}/perf".format(sysconf.PERF_DIR) PERF_EVENTS_STR = ','.join(PERF_EVENTS) PERF_RECORD_OPTS = "record -vvv -a -i -F {} -e {}".format( PERF_FREQ, PERF_EVENTS_STR) # Use -a and don't specify a particular command: perf record will # just run until it gets ctrl-c (SIGINT). # -i: child tasks don't inherit counters. I'm not sure if this applies # to hw events anyway, but do it to be sure; I should be able to account # for parent/child stuff in my own scripts. PERF_STDOUT = 'perf.stdout' PERF_STDERR = 'perf.stderr' PERF_DATA = 'perf.data' PERF_DUMPFILE = 'perf.dump' PERF_REPORTFILE = 'perf.report' PERF_TRACE_DEFAULT_ON = False # Tracing directories and parameters: 1 for true / enable, 0 for false / # disable. # Comments: # - With sched_switch tracing enabled, the trace buffer will fill up # very quickly - even with 678 MB of buffer per-CPU, I couldn't # complete a full Cassandra start+load+run. # - Does enabling userstack_syms also make the trace buffer fill # up more quickly, or not? # - clock: use 'local' for a nanosecond-granularity clock, which should # approximately sync up with events in a 'perf record' trace. Never # use 'global' or your system will become unresponsive! 'x86-tsc' # works, but doesn't seem to be too helpful at the moment. tracing_dir = "{}/tracing".format(sysconf.sys_debug_dir) #trace_buf_mb_per_core = 512 # per-CPU! trace_buf_mb_per_core = int(sysconf.suggested_tracebuf_size_kb / 1024) #trace_buf_mb_per_core = 10 trace_clock = 'local' trace_userstacks = 0 trace_userstack_syms = 0 # tracing/options/sym-userobj: attempt to look up stack fns. Note # that this flag may not work if other tracing/options/ fields (besides # userstacktrace) are enabled. trace_vma_events = 1 trace_pte_events = 0 trace_rss_events = 1 trace_sched_switch = 0 # Note: depending on how kernel is built, may cause deadlock during # userstacktrace collection?? trace_sched_fork = 0 trace_sys_mprotect = 0 all_cpus_prog = "{}/test-programs/all_cpus {}".format( sysconf.apps_dir, sysconf.num_hw_threads) tracefilename = 'trace-events-full' ''' ... ''' class traceinfo: tag = 'traceinfo' # Members: appname = None tracing_on = None trace_outputdir = None trace_on_perf_too = None # was perf turned on via trace_on()? perf_outputdir = None perf_tracing_on = None # is perf trace process active? perf_p = None # Popen object for perf process perf_stdout = None perf_stderr = None pdata_fname = None trace_on_time = None perf_on_time = None def __init__(self, appname): tag = "{}.__init__".format(self.tag) if not appname: print_error_exit(tag, ("missing argument: appname={}").format( appname)) self.appname = appname self.tracing_on = False self.trace_outputdir = None self.perf_outputdir = None self.perf_tracing_on = False self.trace_on_perf_too = False self.perf_p = None self.perf_stdout = None self.perf_stderr = None self.pdata_fname = None self.trace_on_time = None self.perf_on_time = None return # This method performs the following steps: # - Set the kernel tracing options and save them in the outputdir # - Turn on kernel tracing # - Run a small program to ensure that all CPU tracing buffers are active # Returns: True on success, False on error. def trace_on(self, outputdir, descr, use_perf=PERF_TRACE_DEFAULT_ON, targetpid=None): tag = "{}.trace_on".format(self.tag) if self.tracing_on: print_error(tag, ("tracing is already activated!")) return False success = True tdir = tracing_dir if not os.path.exists(outputdir): os.makedirs(outputdir) if (trace_userstacks != 0 and (trace_pte_events != 0 or trace_rss_events != 0)): print_error_exit(tag, ("can't set both trace_userstacks={} " "and trace_pte_events={} or trace_rss_events={} - " "otherwise, when collecting " "userstack entries for 'do_page_fault' code path that " "contains pte trace events, you may invoke further " "page faults, causing recursive trace events or " "whatever and leading to deadlock!").format( trace_userstacks, trace_pte_events, trace_rss_events)) # Set kernel tracing options: options = [] options.append(("echo 0 > {}/tracing_on").format(tdir)) options.append(("echo {} > {}/buffer_size_kb").format( int(trace_buf_mb_per_core*1024), tdir)) options.append(("echo {} > {}/trace_clock").format(trace_clock, tdir)) options.append(("echo 0 > {}/options/overwrite").format(tdir)) options.append(("echo {} > {}/options/sym-userobj").format( trace_userstack_syms, tdir)) options.append(("echo {} > {}/options/userstacktrace").format( trace_userstacks, tdir)) options.append(("echo {} > {}/events/mmap/enable").format( trace_vma_events, tdir)) options.append(("echo {} > {}/events/pte/enable").format( trace_pte_events, tdir)) options.append(("echo {} > {}/events/rss/enable").format( trace_rss_events, tdir)) options.append(("echo {} > {}/events/sched/sched_switch/" "enable").format(trace_sched_switch, tdir)) options.append( ("echo {} > {}/events/sched/sched_process_fork/enable").format( trace_sched_fork, tdir)) options.append( ("echo {} > {}/events/syscalls/sys_enter_mprotect/enable").format( trace_sys_mprotect, tdir)) options.append( ("echo {} > {}/events/syscalls/sys_exit_mprotect/enable").format( trace_sys_mprotect, tdir)) options.append(("echo > {}/trace").format(tdir)) # reset trace write_conf_file(options, "{}/kernel-trace-options".format(outputdir), overwrite=True) # If we use the same traceinfo for multiple trace-on trace-off # cycles and the outputdir is the same (e.g. for a manualapp...), # just overwrite this file. # Prepend sudo to every command: kernel tracing requires root. To # allow redirection to work, must start a full root shell and pass # the command to it? # No - turns out that for these sudo commands, shell=True is # NOT needed, even when the command redirects its stdout/stderr # to a file, as long as the args are split using shlex. shlex # puts the entire 'command' into one arg which is passed to # the new root shell, so the redirection is encapsulated in # that argument. # I think that shell=True IS required when executing a non- # sudo command that uses redirection directly; of course, in # this case an alternative is to set the stdout= and stderr= # arguments instead. for option in options: cmdline = "sudo bash -c '{}'".format(option) args = shlex.split(cmdline) retcode = subprocess.call(args) if retcode != 0: print_error(tag, ("command \"{}\" returned non-zero code " "{}").format(cmdline, retcode)) return False self.trace_on_time = time.perf_counter() # Requires Python 3.3! # http://docs.python.org/3/library/time.html#time.perf_counter # Ok, activate the kernel trace: cmdline = "sudo bash -c 'echo 1 > {}/tracing_on'".format(tdir) args = shlex.split(cmdline) print_debug(tag, "args={}".format(args)) retcode = subprocess.call(args) if retcode != 0: print_error(tag, ("command \"{}\" returned non-zero code " "{}").format(cmdline, retcode)) return False self.tracing_on = True # all_cpus is a program used to spawn a thread on every CPU in the # system, so that the kernel tracing subsystem kicks in - otherwise, # you may see "CPU 1 buffer started" messages in the trace output # after trace events that you want to see have already passed! I have # yet to find a better way to accomplish this... null = get_dev_null() print_debug(tag, ("calling all_cpus: {}").format(all_cpus_prog)) args = shlex.split(all_cpus_prog) retcode = subprocess.call(args, stdout=null, stderr=null) if retcode != 0: print_error(tag, ("command {} returned non-zero code " "{}").format(all_cpus_prog, retcode)) success = False null.close() self.trace_outputdir = outputdir # Turn on hardware event sampling using perf? If so, use the same # outputdir as for kernel trace: if use_perf: self.trace_on_perf_too = True perfsuccess = self.perf_on(self.trace_outputdir) success = success and perfsuccess if targetpid: # Ignore errors (e.g. if process has already died, then # copy will fail...) copy_proc_file(targetpid, 'maps', ("{}/maps.{}").format( self.trace_outputdir, 'trace_on')) copy_proc_file(targetpid, 'smaps', ("{}/smaps.{}").format( self.trace_outputdir, 'trace_on')) return success # This method takes a trace checkpoint, which just consists of: # - Echoing a string to the kernel trace_marker # - Saving the current process tree to a file in the outputdir # that was passed to trace_on(). # For best results, the description argument should be short and free # of unusual characters - it will eventually end up as a filename # prefix when the analysis scripts are run. # Returns: 'success' on normal operation, 'full' if trace checkpoint # failed because trace buffer filled up, 'error' if some other # error occurred. def trace_checkpoint(self, descr, targetpid=None): tag = "{}.trace_checkpoint".format(self.tag) if not self.tracing_on: print_error(tag, "tracing is not activated!") return
for required, given in zip(pdb_required, pdb_list): assert required == given, f"{required} was not created" logger.info(f"All required PDBs created: {pdb_required}") def get_osd_utilization(): """ Get osd utilization value Returns: osd_filled (dict): Dict of osd name and its used value i.e {'osd.1': 15.276289408185841, 'osd.0': 15.276289408185841, 'osd.2': 15.276289408185841} """ osd_filled = {} ceph_cmd = "ceph osd df" ct_pod = pod.get_ceph_tools_pod() output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd) for osd in output.get("nodes"): osd_filled[osd["name"]] = osd["utilization"] return osd_filled def get_ceph_df_detail(): """ Get ceph osd df detail Returns: dict: 'ceph df details' command output """ ceph_cmd = "ceph df detail" ct_pod = pod.get_ceph_tools_pod() return ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd) def validate_replica_data(pool_name, replica): """ Check if data is replica 2 or 3 Args: replica (int): size of the replica(2,3) pool_name (str): name of the pool to check replica Returns: Bool: True if replicated data size is meet rep config and False if dont """ ceph_df_detail_output = get_ceph_df_detail() pool_list = ceph_df_detail_output.get("pools") for pool in pool_list: if pool.get("name") == pool_name: logger.info(f"{pool_name}") stored = pool["stats"]["stored"] byte_used = pool["stats"]["bytes_used"] compress_bytes_used = pool["stats"]["compress_bytes_used"] compress_under_bytes = pool["stats"]["compress_under_bytes"] byte_used = byte_used + compress_under_bytes - compress_bytes_used store_ratio = byte_used / stored if (replica + 0.2) > store_ratio > (replica - 0.2): logger.info(f"pool {pool_name} meet rep {replica} size") return True else: logger.info( f"pool {pool_name} meet do not meet rep {replica}" f" size Store ratio is {store_ratio}" ) return False raise PoolNotFound(f"Pool {pool_name} not found on cluster") def validate_compression(pool_name): """ Check if data was compressed Args: pool_name (str): name of the pool to check replica Returns: bool: True if compression works. False if not """ ceph_df_detail_output = get_ceph_df_detail() pool_list = ceph_df_detail_output.get("pools") for pool in pool_list: if pool.get("name") == pool_name: logger.info(f"{pool_name}") byte_used = pool["stats"]["bytes_used"] compress_bytes_used = pool["stats"]["compress_bytes_used"] compress_under_bytes = pool["stats"]["compress_under_bytes"] all_byte_used = byte_used + compress_under_bytes - compress_bytes_used compression_ratio = byte_used / all_byte_used logger.info(f"this is the comp_ratio {compression_ratio}") if 0.6 < compression_ratio: logger.info( f"Compression ratio {compression_ratio} is " f"larger than 0.6" ) return True else: logger.info( f"Compression ratio {compression_ratio} is " f"smaller than 0.6" ) return False raise PoolNotFound(f"Pool {pool_name} not found on cluster") def validate_osd_utilization(osd_used=80): """ Validates osd utilization matches osd_used value Args: osd_used (int): osd used value Returns: bool: True if all osd values is equal or greater to osd_used. False Otherwise. """ _rc = True osd_filled = get_osd_utilization() for osd, value in osd_filled.items(): if int(value) >= osd_used: logger.info(f"{osd} used value {value}") else: _rc = False logger.warning(f"{osd} used value {value}") return _rc def get_pgs_per_osd(): """ Function to get ceph pg count per OSD Returns: osd_dict (dict): Dict of osd name and its used value i.e {'osd.0': 136, 'osd.2': 136, 'osd.1': 136} """ osd_dict = {} ceph_cmd = "ceph osd df" ct_pod = pod.get_ceph_tools_pod() output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd) for osd in output.get("nodes"): osd_dict[osd["name"]] = osd["pgs"] return osd_dict def get_balancer_eval(): """ Function to get ceph pg balancer eval value Returns: eval_out (float): Eval output of pg balancer """ ceph_cmd = "ceph balancer eval" ct_pod = pod.get_ceph_tools_pod() eval_out = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd).split(" ") return float(eval_out[3]) def get_pg_balancer_status(): """ Function to check pg_balancer active and mode is upmap Returns: bool: True if active and upmap is set else False """ # Check either PG balancer is active or not ceph_cmd = "ceph balancer status" ct_pod = pod.get_ceph_tools_pod() output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd) # Check 'mode' is 'upmap', based on suggestion from Ceph QE # TODO: Revisit this if mode needs change. if output["active"] and output["mode"] == "upmap": logging.info("PG balancer is active and mode is upmap") return True else: logging.error("PG balancer is not active") return False def validate_pg_balancer(): """ Validate either data is equally distributed to OSDs Returns: bool: True if avg PG's per osd difference is <=10 else False """ # Check OSD utilization either pg balancer is active # TODO: Revisit this if pg difference value needs change # TODO: Revisit eval value if pg balancer mode changes from 'upmap' if get_pg_balancer_status(): eval = get_balancer_eval() osd_dict = get_pgs_per_osd() osd_avg_pg_value = round(sum(osd_dict.values()) / len(osd_dict)) osd_pg_value_flag = True for key, value in osd_dict.items(): diff = abs(value - osd_avg_pg_value) if diff <= 10: logging.info(f"{key} PG difference {diff} is acceptable") else: logging.error(f"{key} PG difference {diff} is not acceptable") osd_pg_value_flag = False if osd_pg_value_flag and eval <= 0.025: logging.info( f"Eval value is {eval} and pg distribution " f"average difference is <=10 which is acceptable" ) return True else: logging.error( f"Eval value is {eval} and pg distribution " f"average difference is >=10 which is high and not acceptable" ) return False else: logging.info("pg_balancer is not active") def get_percent_used_capacity(): """ Function to calculate the percentage of used capacity in a cluster Returns: float: The percentage of the used capacity in the cluster """ ct_pod = pod.get_ceph_tools_pod() output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df") total_used = output.get("stats").get("total_used_raw_bytes") total_avail = output.get("stats").get("total_bytes") return 100.0 * total_used / total_avail def get_osd_pods_memory_sum(): """ Get the sum of memory of all OSD pods. This is used to determine the size needed for a PVC so when IO will be running over it the OSDs cache will be filled Returns: int: The sum of the OSD pods memory in GB """ osd_pods = pod.get_osd_pods() num_of_osd_pods = len(osd_pods) osd_pod_mem_size_str = osd_pods[0].get_memory().get("osd") osd_pod_mem_size = convert_device_size( unformatted_size=osd_pod_mem_size_str, units_to_covert_to="GB" ) return num_of_osd_pods * osd_pod_mem_size def get_child_nodes_osd_tree(node_id, osd_tree): """ This function finds the children of a node from the 'ceph osd tree' and returns them as list Args: node_id (int): the id of the node for which the children to be retrieved osd_tree (dict): dictionary containing the output of 'ceph osd tree' Returns: list: of 'children' of a given node_id """ for i in range(len(osd_tree["nodes"])): if osd_tree["nodes"][i]["id"] == node_id: return osd_tree["nodes"][i]["children"] def check_osds_in_hosts_osd_tree(hosts, osd_tree): """ Checks if osds are formed correctly after cluster expansion Args: hosts (list) : List of hosts osd_tree (str) : 'ceph osd tree' command output Returns: bool : True if osd tree formatted correctly """ for each_host in hosts: osd_in_each_host = get_child_nodes_osd_tree(each_host, osd_tree) if len(osd_in_each_host) > 1 or len(osd_in_each_host) <= 0: logger.error( "Error. ceph osd tree is NOT formed correctly after cluster expansion" ) return False logger.info("osd tree verification Passed") return True def check_osd_tree_1az_vmware(osd_tree, number_of_osds): """ Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for deployment and cluster expansion tests. This function is specifically for ocs cluster created on 1 AZ VMWare setup Args: osd_tree (dict): Dictionary of the values which represent 'osd tree'. number_of_osds (int): total number of osds in the cluster Returns: bool: True, if the ceph osd tree is formed correctly. Else False """ # in case of vmware, there will be only one zone as of now. The OSDs are arranged as follows: # ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF # -1 0.99326 root default # -8 0.33109 rack rack0 # -7 0.33109 host ocs-deviceset-0-0-dktqc # 1 hdd 0.33109 osd.1 up 1.00000 1.00000 # There will be 3 racks - rack0, rack1, rack2. # When cluster expansion is successfully done, a host and an osd are added in each rack. # The number of hosts will be equal to the number osds the cluster has. Each rack can # have multiple hosts but each host will have only one osd under it. number_of_hosts_expected = int(number_of_osds / 3) all_hosts = [] racks = osd_tree["nodes"][0]["children"] for rack in racks: hosts = get_child_nodes_osd_tree(rack, osd_tree) if len(hosts) != number_of_hosts_expected: logging.error( f"Number of hosts under rack {rack} " f"is not matching the expected ={number_of_hosts_expected} " ) return False else: all_hosts.append(hosts) all_hosts_flatten = [item for sublist in all_hosts for item in sublist] return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree) def check_osd_tree_3az_aws(osd_tree, number_of_osds): """ Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for deployment and cluster expansion tests. This function is specifically for ocs cluster created on 3 AZ AWS config Args: osd_tree (dict): Dictionary of the values which represent 'osd tree'. number_of_osds (int): total number of osds in the cluster Returns: Boolean: True, if the ceph osd tree is formed correctly. Else False """ all_hosts = [] region = osd_tree["nodes"][0]["children"] zones = get_child_nodes_osd_tree(region[0], osd_tree) for
self.score_type == 'rmse': score_val = rmse_folds else: score_val = norm_rmse_folds except Exception as e: print("Exception occurred while building Prophet model...") print(e) print(' FB Prophet may not be installed or Model is not running...') self.ml_dict[name]['model'] = model self.ml_dict[name]['forecast'] = forecast_df_folds self.ml_dict[name][self.score_type] = score_val self.ml_dict[name]['model_build'] = model_build # if self.__any_contained_in_list(what_list=['ARIMA', 'stats', 'best'], in_list=self.model_type): # ################### Let's build an ARIMA Model and add results ################# # print("\n") # print("="*50) # print("Building ARIMA Model") # print("="*50) # print("\n") # name = 'ARIMA' # # Placeholder for cases when model can not be built # score_val = np.inf # model_build = None # model = None # forecasts = None # print(colorful.BOLD + '\nRunning Non Seasonal ARIMA Model...' + colorful.END) # try: # model_build = BuildArima( # stats_scoring, p_max, d_max, q_max, # forecast_period=self.forecast_period, method='mle', verbose=self.verbose # ) # model, forecasts, rmse, norm_rmse = model_build.fit( # ts_df[target] # ) # if self.score_type == 'rmse': # score_val = rmse # else: # score_val = norm_rmse # except Exception as e: # print("Exception occurred while building ARIMA model...") # print(e) # print(' ARIMA model error: predictions not available.') # self.ml_dict[name]['model'] = model # self.ml_dict[name]['forecast'] = forecasts # self.ml_dict[name][self.score_type] = score_val # self.ml_dict[name]['model_build'] = model_build if self.__any_contained_in_list(what_list=['ARIMA','arima','auto_arima','auto_SARIMAX', 'stats', 'best'], in_list=self.model_type): ############# Let's build a SARIMAX Model and get results ######################## print("\n") print("="*50) print("Building Auto SARIMAX Model") print("="*50) print("\n") name = 'auto_SARIMAX' # Placeholder for cases when model can not be built score_val = np.inf model_build = None model = None forecast_df_folds = None print(colorful.BOLD + '\nRunning Auto SARIMAX Model...' + colorful.END) try: model_build = BuildAutoSarimax( scoring=stats_scoring, seasonality=self.seasonality, seasonal_period=self.seasonal_period, p_max=p_max, d_max=d_max, q_max=q_max, forecast_period=self.forecast_period, verbose=self.verbose ) model, forecast_df_folds, rmse_folds, norm_rmse_folds = model_build.fit( ts_df=ts_df[[target]+preds], target_col=target, cv = cv ) if self.score_type == 'rmse': score_val = rmse_folds else: score_val = norm_rmse_folds except Exception as e: print("Exception occurred while building Auto SARIMAX model...") print(e) print(' Auto SARIMAX model error: predictions not available.') self.ml_dict[name]['model'] = model self.ml_dict[name]['forecast'] = forecast_df_folds self.ml_dict[name][self.score_type] = score_val self.ml_dict[name]['model_build'] = model_build if self.__any_contained_in_list(what_list=['var','Var','VAR', 'stats', 'best'], in_list=self.model_type): ########### Let's build a VAR Model - but first we have to shift the predictor vars #### if ts_df.shape[0] > 1000 and self.__any_contained_in_list(what_list=['stats', 'best'], in_list=self.model_type): print(colorful.BOLD + '\n===============================================' + colorful.END) print("Skipping VAR Model since dataset is > 1000 rows and it will take too long") print(colorful.BOLD + '===============================================' + colorful.END) else: print("\n") print("="*50) print("Building VAR Model - best suited for small datasets < 1000 rows and < 10 columns") print("="*50) print("\n") name = 'VAR' # Placeholder for cases when model can not be built score_val = np.inf model_build = None model = None forecasts = None if len(preds) == 0: print(colorful.BOLD + '\nNo VAR model created since no explanatory variables given in data set' + colorful.END) else: try: print(colorful.BOLD + '\nRunning VAR Model...' + colorful.END) print(' Shifting %d predictors by 1 to align prior predictor values with current target values...' %len(preds)) # TODO: This causes an issue later in ML (most likely cause of https://github.com/AutoViML/Auto_TS/issues/15) # Since we are passing ts_df there. Make sure you don't assign it # back to the same variable. Make a copy and make changes to that copy. ts_df_shifted = ts_df.copy(deep=True) ts_df_shifted[preds] = ts_df_shifted[preds].shift(1) ts_df_shifted.dropna(axis=0,inplace=True) model_build = BuildVAR(scoring=stats_scoring, forecast_period=self.forecast_period, p_max=p_max, q_max=q_max) model, forecasts, rmse, norm_rmse = model_build.fit( ts_df_shifted[[target]+preds], target_col=target, cv = cv ) if self.score_type == 'rmse': score_val = rmse else: score_val = norm_rmse except Exception as e: print("Exception occurred while building VAR model...") print(e) warnings.warn(' VAR model error: predictions not available.') self.ml_dict[name]['model'] = model self.ml_dict[name]['forecast'] = forecasts self.ml_dict[name][self.score_type] = score_val self.ml_dict[name]['model_build'] = model_build if self.__any_contained_in_list(what_list=['ml', 'ML','best'], in_list=self.model_type): ########## Let's build a Machine Learning Model now with Time Series Data ################ print("\n") print("="*50) print("Building ML Model") print("="*50) print("\n") name = 'ML' # Placeholder for cases when model can not be built score_val = np.inf model_build = None model = None forecasts = None if len(preds) == 0: print(colorful.BOLD + f'\nCreating lag={self.seasonal_period} variable using target for Machine Learning model...' + colorful.END) ### Set the lag to be 1 since we don't need too many lagged variables for univariate case self.lags = self.seasonal_period lag = self.seasonal_period else: print(colorful.BOLD + '\nRunning Machine Learning Models...' + colorful.END) #### Do not create excess lagged variables for ML model ########## if lag <= 4: lag = 4 ### set the minimum lags to be at least 4 for ML models elif lag >= 10: lag = 10 ### set the maximum lags to be not more than 10 for ML models print(' Shifting %d predictors by lag=%d to align prior predictor with current target...' % (len(preds), lag)) ####### Now make sure that there is only as few lags as needed ###### model_build = BuildML( scoring=self.score_type, forecast_period = self.forecast_period, ts_column = self.ts_column, verbose=self.verbose) try: # best = model_build.fit(ts_df=ts_df, target_col=target, lags=lag) model, forecasts, rmse, norm_rmse = model_build.fit( ts_df=ts_df, target_col=target, ts_column = self.ts_column, cv = cv, lags=lag ) if self.score_type == 'rmse': score_val = rmse else: score_val = norm_rmse # bestmodel = best[0] # #### Plotting actual vs predicted for ML Model ################# # # TODO: Move inside the Build Class # plt.figure(figsize=(5, 5)) # plt.scatter(train.append(test)[target].values, # np.r_[bestmodel.predict(train[preds]), bestmodel.predict(test[preds])]) # plt.xlabel('Actual') # plt.ylabel('Predicted') # plt.show(block=False) # ############ Draw a plot of the Time Series data ###### # time_series_plot(dfxs[target], chart_time=self.time_interval) except Exception as e: print("Exception occurred while building ML model...") print(e) print(' For ML model, evaluation score is not available.') self.ml_dict[name]['model'] = model self.ml_dict[name]['forecast'] = forecasts self.ml_dict[name][self.score_type] = score_val self.ml_dict[name]['model_build'] = model_build if not self.__all_contained_in_list(what_list=self.model_type, in_list=self.allowed_models): print(f'The model_type should be any of the following: {self.allowed_models}. You entered {self.model_type}. Some models may not have been developed...') if len(list(self.ml_dict.keys())) == 0: return None ######## Selecting the best model based on the lowest rmse score ###### best_model_name = self.get_best_model_name() print(colorful.BOLD + '\nBest Model is: ' + colorful.END + best_model_name) best_model_dict = self.ml_dict[best_model_name] if best_model_dict is not None: cv_scores = best_model_dict.get(self.score_type) if len(cv_scores) == 0: mean_cv_score = np.inf else: mean_cv_score = get_mean_cv_score(cv_scores) print(" Best Model (Mean CV) Score: %0.2f" % mean_cv_score) #self.ml_dict[best_model_name][self.score_type]) end = time() elapsed = end-start print("\n\n" + "-"*50) print(f"Total time taken: {elapsed:.0f} seconds.") print("-"*50 + "\n\n") print("Leaderboard with best model on top of list:\n",self.get_leaderboard()) return self def get_best_model_name(self) -> str: """ Returns the best model name """ f1_stats = {} for key, _ in self.ml_dict.items(): cv_scores = self.ml_dict[key][self.score_type] # Standardize to a list if isinstance(cv_scores, np.ndarray): cv_scores = cv_scores.tolist() if not isinstance(cv_scores, List): cv_scores = [cv_scores] if len(cv_scores) == 0: f1_stats[key] = np.inf else: f1_stats[key] = sum(cv_scores)/len(cv_scores) best_model_name = min(f1_stats.items(), key=operator.itemgetter(1))[0] return best_model_name def get_best_model(self): """ Returns the best model after training """ return self.ml_dict.get(self.get_best_model_name()).get('model') def get_model(self, model_name: str): """ Returns the specified model """ if self.ml_dict.get(model_name) is not None: return self.ml_dict.get(model_name).get('model') else: print(f"Model with name '{model_name}' does not exist.") return None def get_best_model_build(self): """ Returns the best model after training """ return self.ml_dict.get(self.get_best_model_name()).get('model_build') def get_model_build(self, model_name: str): """ Returns the specified model """ if self.ml_dict.get(model_name) is not None: return self.ml_dict.get(model_name).get('model_build') else: print(f"Model with name '{model_name}' does not exist.") return None def get_ml_dict(self): """ Returns the entire ML Dictionary """ return self.ml_dict def predict( self, testdata, model: str = '', simple: bool = False, ): """ Predict the results """ if isinstance(model, str): if model == '': bestmodel = self.get_best_model_build() elif model.lower() == 'best': bestmodel = self.get_best_model_build() else: if self.get_model_build(model) is not None: bestmodel = self.get_model_build(model) else: print(f"(Error) Model of type '{model}' does not exist. No predictions will be made.") return None self.model = bestmodel else: ### if no model is specified, just use the best model ### bestmodel = self.get_best_model_build() self.model = bestmodel if isinstance(testdata, pd.Series) or isinstance(testdata, pd.DataFrame): # During training, we internally converted a column datetime index to the dataframe date time index # We need to do the same while predicing for consistence if (model == 'ML') or self.get_best_model_name() == 'ML' or (model == 'best' and self.get_best_model_name() == 'ML'): if self.ts_column in testdata.columns: testdata.set_index(self.ts_column, inplace=True) elif self.ts_column
Name: 媒体文件名称。 :type Name: str :param Description: 媒体文件描述。 :type Description: str :param CreateTime: 媒体文件的创建时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。 :type CreateTime: str :param UpdateTime: 媒体文件的最近更新时间(如修改视频属性、发起视频处理等会触发更新媒体文件信息的操作),使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。 :type UpdateTime: str :param ExpireTime: 媒体文件的过期时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。过期后该媒体文件及其相关资源(转码结果、雪碧图等)将被永久删除。“9999-12-31T23:59:59Z”表示永不过期。 :type ExpireTime: str :param ClassId: 媒体文件的分类 ID。 :type ClassId: int :param ClassName: 媒体文件的分类名称。 :type ClassName: str :param ClassPath: 媒体文件的分类路径,分类间以“-”分隔,如“新的一级分类 - 新的二级分类”。 :type ClassPath: str :param CoverUrl: 媒体文件的封面图片地址。 :type CoverUrl: str :param Type: 媒体文件的封装格式,例如 mp4、flv 等。 :type Type: str :param MediaUrl: 原始媒体文件的 URL 地址。 :type MediaUrl: str :param SourceInfo: 该媒体文件的来源信息。 注意:此字段可能返回 null,表示取不到有效值。 :type SourceInfo: :class:`tencentcloud.vod.v20180717.models.MediaSourceData` :param StorageRegion: 媒体文件存储地区,如 ap-chongqing,参见[地域列表](https://cloud.tencent.com/document/product/266/9760#.E5.B7.B2.E6.94.AF.E6.8C.81.E5.9C.B0.E5.9F.9F.E5.88.97.E8.A1.A8)。 :type StorageRegion: str :param TagSet: 媒体文件的标签信息。 :type TagSet: list of str :param Vid: 直播录制文件的唯一标识。 :type Vid: str :param Category: 文件类型: <li>Video: 视频文件</li> <li>Audio: 音频文件</li> <li>Image: 图片文件</li> :type Category: str :param Status: 文件状态:Normal:正常,Forbidden:封禁。 *注意:此字段暂不支持。 :type Status: str :param StorageClass: 媒体文件的存储类别: <li>STANDARD:标准存储。</li> <li>STANDARD_IA:低频存储。</li> :type StorageClass: str """ self.Name = None self.Description = None self.CreateTime = None self.UpdateTime = None self.ExpireTime = None self.ClassId = None self.ClassName = None self.ClassPath = None self.CoverUrl = None self.Type = None self.MediaUrl = None self.SourceInfo = None self.StorageRegion = None self.TagSet = None self.Vid = None self.Category = None self.Status = None self.StorageClass = None def _deserialize(self, params): self.Name = params.get("Name") self.Description = params.get("Description") self.CreateTime = params.get("CreateTime") self.UpdateTime = params.get("UpdateTime") self.ExpireTime = params.get("ExpireTime") self.ClassId = params.get("ClassId") self.ClassName = params.get("ClassName") self.ClassPath = params.get("ClassPath") self.CoverUrl = params.get("CoverUrl") self.Type = params.get("Type") self.MediaUrl = params.get("MediaUrl") if params.get("SourceInfo") is not None: self.SourceInfo = MediaSourceData() self.SourceInfo._deserialize(params.get("SourceInfo")) self.StorageRegion = params.get("StorageRegion") self.TagSet = params.get("TagSet") self.Vid = params.get("Vid") self.Category = params.get("Category") self.Status = params.get("Status") self.StorageClass = params.get("StorageClass") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaClassInfo(AbstractModel): """分类信息描述 """ def __init__(self): """ :param ClassId: 分类 ID :type ClassId: int :param ParentId: 父类 ID,一级分类的父类 ID 为 -1。 :type ParentId: int :param ClassName: 分类名称 :type ClassName: str :param Level: 分类级别,一级分类为 0,最大值为 3,即最多允许 4 级分类层次。 :type Level: int :param SubClassIdSet: 当前分类的第一级子类 ID 集合 :type SubClassIdSet: list of int """ self.ClassId = None self.ParentId = None self.ClassName = None self.Level = None self.SubClassIdSet = None def _deserialize(self, params): self.ClassId = params.get("ClassId") self.ParentId = params.get("ParentId") self.ClassName = params.get("ClassName") self.Level = params.get("Level") self.SubClassIdSet = params.get("SubClassIdSet") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaContentReviewAsrTextSegmentItem(AbstractModel): """内容审核 Asr 文字审核嫌疑片段 """ def __init__(self): """ :param StartTimeOffset: 嫌疑片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 嫌疑片段结束的偏移时间,单位:秒。 :type EndTimeOffset: float :param Confidence: 嫌疑片段置信度。 :type Confidence: float :param Suggestion: 嫌疑片段审核结果建议,取值范围: <li>pass。</li> <li>review。</li> <li>block。</li> :type Suggestion: str :param KeywordSet: 嫌疑关键词列表。 :type KeywordSet: list of str """ self.StartTimeOffset = None self.EndTimeOffset = None self.Confidence = None self.Suggestion = None self.KeywordSet = None def _deserialize(self, params): self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") self.Confidence = params.get("Confidence") self.Suggestion = params.get("Suggestion") self.KeywordSet = params.get("KeywordSet") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaContentReviewOcrTextSegmentItem(AbstractModel): """内容审核 Ocr 文字审核嫌疑片段 """ def __init__(self): """ :param StartTimeOffset: 嫌疑片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 嫌疑片段结束的偏移时间,单位:秒。 :type EndTimeOffset: float :param Confidence: 嫌疑片段置信度。 :type Confidence: float :param Suggestion: 嫌疑片段审核结果建议,取值范围: <li>pass。</li> <li>review。</li> <li>block。</li> :type Suggestion: str :param KeywordSet: 嫌疑关键词列表。 :type KeywordSet: list of str :param AreaCoordSet: 嫌疑文字出现的区域坐标 (像素级),[x1, y1, x2, y2],即左上角坐标、右下角坐标。 :type AreaCoordSet: list of int :param Url: 嫌疑图片 URL (图片不会永久存储,到达 PicUrlExpireTime 时间点后图片将被删除)。 :type Url: str :param PicUrlExpireTime: 嫌疑图片 URL 失效时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。 :type PicUrlExpireTime: str """ self.StartTimeOffset = None self.EndTimeOffset = None self.Confidence = None self.Suggestion = None self.KeywordSet = None self.AreaCoordSet = None self.Url = None self.PicUrlExpireTime = None def _deserialize(self, params): self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") self.Confidence = params.get("Confidence") self.Suggestion = params.get("Suggestion") self.KeywordSet = params.get("KeywordSet") self.AreaCoordSet = params.get("AreaCoordSet") self.Url = params.get("Url") self.PicUrlExpireTime = params.get("PicUrlExpireTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaContentReviewPoliticalSegmentItem(AbstractModel): """内容审核涉政嫌疑片段 """ def __init__(self): """ :param StartTimeOffset: 嫌疑片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 嫌疑片段结束的偏移时间,单位:秒。 :type EndTimeOffset: float :param Confidence: 嫌疑片段涉政分数。 :type Confidence: float :param Suggestion: 嫌疑片段鉴政结果建议,取值范围: <li>pass。</li> <li>review。</li> <li>block。</li> :type Suggestion: str :param Name: 涉政人物、违规图标名字。 :type Name: str :param Label: 嫌疑片段鉴政结果标签。内容审核模板[画面鉴政任务控制参数](https://cloud.tencent.com/document/api/266/31773#PoliticalImgReviewTemplateInfo)里 LabelSet 参数与此参数取值范围的对应关系: violation_photo: <li>violation_photo:违规图标。</li> politician: <li>nation_politician:国家领导人;</li> <li>province_politician: 省部级领导人;</li> <li>bureau_politician:厅局级领导人;</li> <li>county_politician:县处级领导人;</li> <li>rural_politician:乡科级领导人;</li> <li>sensitive_politician:敏感政治人物;</li> <li>foreign_politician:国外领导人。</li> entertainment: <li>sensitive_entertainment:敏感娱乐人物。</li> sport: <li>sensitive_sport:敏感体育人物。</li> entrepreneur: <li>sensitive_entrepreneur:敏感商业人物。</li> scholar: <li>sensitive_scholar:敏感教育学者。</li> celebrity: <li>sensitive_celebrity:敏感知名人物;</li> <li>historical_celebrity:历史知名人物。</li> military: <li>sensitive_military:敏感军事人物。</li> :type Label: str :param Url: 嫌疑图片 URL (图片不会永久存储,到达 PicUrlExpireTime 时间点后图片将被删除)。 :type Url: str :param AreaCoordSet: 涉政人物、违规图标出现的区域坐标 (像素级),[x1, y1, x2, y2],即左上角坐标、右下角坐标。 :type AreaCoordSet: list of int :param PicUrlExpireTimeStamp: 该字段已废弃,请使用 PicUrlExpireTime。 :type PicUrlExpireTimeStamp: int :param PicUrlExpireTime: 嫌疑图片 URL 失效时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。 :type PicUrlExpireTime: str """ self.StartTimeOffset = None self.EndTimeOffset = None self.Confidence = None self.Suggestion = None self.Name = None self.Label = None self.Url = None self.AreaCoordSet = None self.PicUrlExpireTimeStamp = None self.PicUrlExpireTime = None def _deserialize(self, params): self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") self.Confidence = params.get("Confidence") self.Suggestion = params.get("Suggestion") self.Name = params.get("Name") self.Label = params.get("Label") self.Url = params.get("Url") self.AreaCoordSet = params.get("AreaCoordSet") self.PicUrlExpireTimeStamp = params.get("PicUrlExpireTimeStamp") self.PicUrlExpireTime = params.get("PicUrlExpireTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaContentReviewSegmentItem(AbstractModel): """内容审核涉黄/暴恐嫌疑片段 """ def __init__(self): """ :param StartTimeOffset: 嫌疑片段起始的偏移时间,单位:秒。 :type StartTimeOffset: float :param EndTimeOffset: 嫌疑片段结束的偏移时间,单位:秒。 :type EndTimeOffset: float :param Confidence: 嫌疑片段涉黄分数。 :type Confidence: float :param Label: 嫌疑片段鉴黄结果标签。 :type Label: str :param Suggestion: 嫌疑片段鉴黄结果建议,取值范围: <li>pass。</li> <li>review。</li> <li>block。</li> :type Suggestion: str :param Url: 嫌疑图片 URL (图片不会永久存储,到达 PicUrlExpireTime 时间点后图片将被删除)。 :type Url: str :param PicUrlExpireTimeStamp: 该字段已废弃,请使用 PicUrlExpireTime。 :type PicUrlExpireTimeStamp: int :param PicUrlExpireTime: 嫌疑图片 URL 失效时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。 :type PicUrlExpireTime: str """ self.StartTimeOffset = None self.EndTimeOffset = None self.Confidence = None self.Label = None self.Suggestion = None self.Url = None self.PicUrlExpireTimeStamp = None self.PicUrlExpireTime = None def _deserialize(self, params): self.StartTimeOffset = params.get("StartTimeOffset") self.EndTimeOffset = params.get("EndTimeOffset") self.Confidence = params.get("Confidence") self.Label = params.get("Label") self.Suggestion = params.get("Suggestion") self.Url = params.get("Url") self.PicUrlExpireTimeStamp = params.get("PicUrlExpireTimeStamp") self.PicUrlExpireTime = params.get("PicUrlExpireTime") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaDeleteItem(AbstractModel): """指定删除点播视频时的删除内容 """ def __init__(self): """ :param Type: 所指定的删除部分。如果未填写该字段则参数无效。可选值有: <li>TranscodeFiles(删除转码文件)。</li> <li>WechatPublishFiles(删除微信发布文件)。</li> :type Type: str :param Definition: 删除由Type参数指定的种类下的视频模板号,模板定义参见[转码模板](https://cloud.tencent.com/document/product/266/33478#.3Cspan-id-.3D-.22zm.22-.3E.3C.2Fspan.3E.E8.BD.AC.E7.A0.81.E6.A8.A1.E6.9D.BF)。 默认值为0,表示删除参数Type指定种类下所有的视频。 :type Definition: int """ self.Type = None self.Definition = None def _deserialize(self, params): self.Type = params.get("Type") self.Definition = params.get("Definition") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaImageSpriteInfo(AbstractModel): """点播文件雪碧图信息 """ def __init__(self): """ :param ImageSpriteSet: 特定规格的雪碧图信息集合,每个元素代表一套相同规格的雪碧图。 :type ImageSpriteSet: list of MediaImageSpriteItem """ self.ImageSpriteSet = None def _deserialize(self, params): if params.get("ImageSpriteSet") is not None: self.ImageSpriteSet = [] for item in params.get("ImageSpriteSet"): obj = MediaImageSpriteItem() obj._deserialize(item) self.ImageSpriteSet.append(obj) memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaImageSpriteItem(AbstractModel): """雪碧图信息 """ def __init__(self): """ :param Definition: 雪碧图规格,参见[雪碧图参数模板](https://cloud.tencent.com/document/product/266/33480#.E9.9B.AA.E7.A2.A7.E5.9B.BE.E6.A8.A1.E6.9D.BF)。 :type Definition: int :param Height: 雪碧图小图的高度。 :type Height: int :param Width: 雪碧图小图的宽度。 :type Width: int :param TotalCount: 每一张雪碧图大图里小图的数量。 :type TotalCount: int :param ImageUrlSet: 每一张雪碧图大图的地址。 :type ImageUrlSet: list of str :param WebVttUrl: 雪碧图子图位置与时间关系的 WebVtt 文件地址。WebVtt 文件表明了各个雪碧图小图对应的时间点,以及在雪碧大图里的坐标位置,一般被播放器用于实现预览。 :type WebVttUrl: str """ self.Definition = None self.Height = None self.Width = None self.TotalCount = None self.ImageUrlSet = None self.WebVttUrl = None def _deserialize(self, params): self.Definition = params.get("Definition") self.Height = params.get("Height") self.Width = params.get("Width") self.TotalCount = params.get("TotalCount") self.ImageUrlSet = params.get("ImageUrlSet") self.WebVttUrl = params.get("WebVttUrl") memeber_set = set(params.keys()) for name, value in vars(self).items(): if name in memeber_set: memeber_set.remove(name) if len(memeber_set) > 0: warnings.warn("%s fileds are useless." % ",".join(memeber_set)) class MediaInfo(AbstractModel): """点播文件信息 """ def __init__(self): """ :param BasicInfo: 基础信息。包括视频名称、分类、播放地址、封面图片等。 注意:此字段可能返回 null,表示取不到有效值。 :type BasicInfo: :class:`tencentcloud.vod.v20180717.models.MediaBasicInfo` :param MetaData: 元信息。包括大小、时长、视频流信息、音频流信息等。 注意:此字段可能返回 null,表示取不到有效值。 :type MetaData: :class:`tencentcloud.vod.v20180717.models.MediaMetaData` :param TranscodeInfo: 转码结果信息。包括该视频转码生成的各种码率的视频的地址、规格、码率、分辨率等。 注意:此字段可能返回 null,表示取不到有效值。 :type TranscodeInfo: :class:`tencentcloud.vod.v20180717.models.MediaTranscodeInfo` :param AnimatedGraphicsInfo: 转动图结果信息。对视频转动图(如 gif)后,动图相关信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AnimatedGraphicsInfo: :class:`tencentcloud.vod.v20180717.models.MediaAnimatedGraphicsInfo` :param SampleSnapshotInfo: 采样截图信息。对视频采样截图后,相关截图信息。 注意:此字段可能返回 null,表示取不到有效值。 :type SampleSnapshotInfo: :class:`tencentcloud.vod.v20180717.models.MediaSampleSnapshotInfo` :param ImageSpriteInfo: 雪碧图信息。对视频截取雪碧图之后,雪碧的相关信息。 注意:此字段可能返回 null,表示取不到有效值。 :type ImageSpriteInfo: :class:`tencentcloud.vod.v20180717.models.MediaImageSpriteInfo` :param SnapshotByTimeOffsetInfo: 指定时间点截图信息。对视频依照指定时间点截图后,各个截图的信息。 注意:此字段可能返回 null,表示取不到有效值。 :type SnapshotByTimeOffsetInfo: :class:`tencentcloud.vod.v20180717.models.MediaSnapshotByTimeOffsetInfo` :param KeyFrameDescInfo: 视频打点信息。对视频设置的各个打点信息。 注意:此字段可能返回 null,表示取不到有效值。 :type KeyFrameDescInfo: :class:`tencentcloud.vod.v20180717.models.MediaKeyFrameDescInfo` :param AdaptiveDynamicStreamingInfo: 转自适应码流信息。包括规格、加密类型、打包格式等相关信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AdaptiveDynamicStreamingInfo: :class:`tencentcloud.vod.v20180717.models.MediaAdaptiveDynamicStreamingInfo` :param MiniProgramReviewInfo: 小程序审核信息。 注意:此字段可能返回 null,表示取不到有效值。 :type MiniProgramReviewInfo: :class:`tencentcloud.vod.v20180717.models.MediaMiniProgramReviewInfo` :param SubtitleInfo: 字幕信息。 注意:此字段可能返回 null,表示取不到有效值。 :type SubtitleInfo: :class:`tencentcloud.vod.v20180717.models.MediaSubtitleInfo` :param FileId: 媒体文件唯一标识 ID。 :type FileId: str """ self.BasicInfo = None self.MetaData = None self.TranscodeInfo = None self.AnimatedGraphicsInfo = None self.SampleSnapshotInfo = None self.ImageSpriteInfo = None self.SnapshotByTimeOffsetInfo = None self.KeyFrameDescInfo = None self.AdaptiveDynamicStreamingInfo
np.min(rres) else: self.se_dur = tt[(rres<1.)][-1] - tt[(rres<1.)][0] if np.sum(rres<1.)>1 else 0 self.se_depth = np.min(rres) return np.mean(tt, axis=1), np.mean(rres, axis=1) def rvprep(self, t, rv1, rv2, drv1, drv2): """Stores observed radial velocity data points Parameters ---------- t : float array or scalar times of observations rv1 : float array or scalar RV of primary in m/s rv2 : float array or scalar RV of secondary in m/s dr1 : float array of scalar RV err of primary in m/s dr2 : float array or scalar RV err of secondary in m/s Returns ------- m1, m2, k0 : tuple guesses for the masses and systemic velocity of the binary from the RV semi-amplitudes """ self.rv1_obs = rv1 self.rv2_obs = rv2 self.rv1_err_obs = drv1 self.rv2_err_obs = drv2 self.rv_t = t self.bad1 = np.isnan(self.rv1_obs) | np.isnan(self.rv1_err_obs) | (self.rv1_err_obs == 0.) self.bad2 = np.isnan(self.rv2_obs) | np.isnan(self.rv2_err_obs) | (self.rv2_err_obs == 0.) k1 = (np.nanmax(self.rv1_obs[~self.bad1]) - np.nanmin(self.rv1_obs[~self.bad1]))/2. k2 = (np.nanmax(self.rv2_obs[~self.bad2]) - np.nanmin(self.rv2_obs[~self.bad2]))/2. k0 = np.nanmedian(np.append(self.rv1_obs[~self.bad1], self.rv2_obs[~self.bad2])) m1, m2 = self.rvpars_guess_mass(k1, k2, self.pars['period'], np.sqrt(self.pars['esinw']**2 + self.pars['ecosw']**2)) return m1, m2, k0 def rvfit(self, rvpars, t): """Computes the radial velocities of each binary component. Parameters ---------- rvpars : float array or list msum, mrat, period, tpe, esinw, ecosw, inc, k0, rverr t : float array or scalar times of observations to compute RV Returns ------- vr1, vr2 : tuple RVs in m/s, where vr1 and vr2 are of shape/type of input t """ msum, mrat, period, tpe, esinw, ecosw, inc, k0, rverr = rvpars e = np.sqrt(esinw**2+ecosw**2) omega = np.arctan2(esinw, ecosw) inc = inc % TWOPI fpe = np.pi/2. - omega m1, m2 = self.sumrat_to_12(msum, mrat) self.updatepars(msum=msum, mrat=mrat, m1=m1, m2=m2, period=period, tpe=tpe, esinw=esinw, ecosw=ecosw, k0=k0, rverr=rverr, inc=inc) # t0 = tpe - (-np.sqrt(1.-e**2) * period / (2.*np.pi)) * \ # (e*np.sin(fpe)/(1.+e*np.cos(fpe)) - 2.*(1.-e**2)**(-0.5) * \ # np.arctan(np.sqrt(1.-e**2) * np.tan((fpe)/2.) / (1.+e))) t0 = tpe - self.sudarsky(fpe, e, period) maf = rsky(e, period, t0, 1e-8, t) amp = 29794.509 / np.sqrt(1-e**2) * (period/d2y)**(-1/3.) / (m1+m2)**(2/3.) vr2 = -amp * m1 * np.sin(inc+np.pi) * \ (np.cos(omega+maf) + e * np.cos(omega)) omega+=np.pi # periapse of primary is 180 offset vr1 = -amp * m2 * np.sin(inc+np.pi) * \ (np.cos(omega+maf) + e * np.cos(omega)) return vr1+k0, vr2+k0 def lcfit(self, lcpars, jd, quarter, flux, dflux, crowd, polyorder=2, ooe=True): """Computes light curve model Parameters ---------- lcpars : float array parameters for LC fitting: msum, rsum, rratio, period, tpe, esinw, ecosw, b, frat, q1, q2, q3, q4 jd : float array time array quarter : float array corresponding kepler quarter for a given time flux : float array observed flux dflux : float array flux error crowd : float array array of crowding values (additional flux) polyorder : int order of polynomial to detrend lightcurve Returns ------- totmod : float array array of model fluxes totpol : float array array of polynomials for detrending """ # r1, r2, frat derive from m1, m2, z0, t0, dist, E(B-V), scaleheight msum, rsum, rrat, period, tpe, esinw, ecosw, b, frat, \ q1, q2, q3, q4 = lcpars # LD transformations (Kipping 2013) c1 = 2.*np.sqrt(q1)*q2 c2 = np.sqrt(q1)*(1.-2.*q2) c3 = 2.*np.sqrt(q3)*q4 c4 = np.sqrt(q3)*(1.-2.*q4) ldcoeffs1 = np.array([c1, c2]) ldcoeffs2 = np.array([c3, c4]) # if r2 > r1: # r1, r2 = r2, r1 # m1, m2 = m2, m1 # frat = 1./frat omega=np.arctan2(esinw,ecosw) e=np.sqrt(esinw**2+ecosw**2) # nip it at the bud. if (e>=1.): #print "e>=1", e return -np.inf, -np.inf # r1 = rsum/(1.+rrat) # r2 = rsum/(1.+1./rrat) r1, r2 = self.sumrat_to_12(rsum, rrat) a = self.get_a(period, msum) inc = self.get_inc(b, r1, a) % TWOPI #inc = np.arccos(b*r1/(a/r2au)) if np.isnan(inc): #print "inc is nan", inc return -np.inf, -np.inf self.updatepars(msum=msum, rsum=rsum, rrat=rrat, period=period, tpe=tpe, esinw=esinw, ecosw=ecosw, b=b, q1=q1, q2=q2, q3=q3, q4=q4, frat=frat, r1=r1, r2=r2, inc=inc) fpe = np.pi/2. - omega fse = -np.pi/2. - omega # transform time of center of PE to time of periastron (t0) # from Eq 9 of Sudarsky et al (2005) t0 = tpe - self.sudarsky(fpe, e, period) tse = t0 + self.sudarsky(fse, e, period) # t0 = tpe - (-np.sqrt(1.-e**2) * period / (2.*np.pi)) * \ # (e*np.sin(fpe)/(1.+e*np.cos(fpe)) - 2.*(1.-e**2)**(-0.5) * \ # np.arctan(np.sqrt(1.-e**2) * np.tan((fpe)/2.) / (1.+e))) # tse = t0 + (-np.sqrt(1.-e**2) * period / (2.*np.pi)) * \ # (e*np.sin(fse)/(1.+e*np.cos(fse)) - 2.*(1.-e**2)**(-0.5) * \ # np.arctan(np.sqrt(1.-e**2) * np.tan((fse)/2.) / (1.+e))) self.tpe = tpe self.tse = tse # if tse<tpe: # tse+=period tempt1, tempres1 = self.lctemplate(lcpars, period, omega, e, a, inc, r1, ldcoeffs1, r2/r1, tpe, t0, cadence = self.cadence, exp = self.exp, pe=True) tempt2, tempres2 = self.lctemplate(lcpars, period, omega, e, a, inc, r2, ldcoeffs2, r1/r2, tse, t0, cadence = self.cadence, exp = self.exp, pe=False) if np.any(np.isinf(tempt1)) or np.any(np.isinf(tempt2)): return -np.inf, -np.inf tempt1 = tempt1 % period tempt2 = tempt2 % period tempres1 = (tempres1 - 1.)/(1. + frat) + 1. tempres2 = (tempres2 - 1.)/(1. + 1./frat) + 1. sorting1 = np.argsort(tempt1) sorting2 = np.argsort(tempt2) tempres1 = tempres1[sorting1] tempt1 = tempt1[sorting1] tempres2 = tempres2[sorting2] tempt2 = tempt2[sorting2] #not including crowdsap term. #tempres1 = (tempres1 + frat) / (1.+frat) #tempres2 = (tempres2 * frat + 1.) / (1. + frat) totmod, totpol = np.ones(len(jd)), np.ones(len(jd)) maf = rsky(e, period, t0, 1e-8, jd) r = a*(1.-e**2) / (1.+e*np.cos(maf)) zcomp = np.sin(omega+maf) * np.sin(inc) pe = ((r*zcomp>0.)) #& (z <= 1.05*(r1+r2)*r2au)) se = ((r*zcomp<0.)) #& (z <= 1.05*(r1+r2)*r2au)) tt = jd % period if pe.any(): totmod[pe] = np.interp(tt[pe], tempt1, tempres1) totmod[pe] = (totmod[pe] - 1.) * crowd[pe] + 1. if se.any(): totmod[se] = np.interp(tt[se], tempt2, tempres2) totmod[se] = (totmod[se] - 1.) * crowd[se] + 1. if polyorder>0: if (self.sep-self.clip_tol*(self.pwidth+self.swidth) < self.pwidth): chunk = np.array(np.where(np.diff(jd) > np.median(np.diff(jd))*4.))[0] else: chunk = np.array(np.where(np.diff(jd) > self.pwidth*period))[0] #put in dummy first and last element # placeholders chunk = np.append(chunk, len(jd)-2).flatten() _, chunk3 = np.unique(np.searchsorted(jd[chunk], jd), return_index=True) chunk=chunk3 chunk[-1]+=1 chunk = np.unique(np.sort(np.append(chunk, np.where(np.diff(quarter)>0)[0]+1))) totpol = poly_lc_cwrapper(jd, flux, dflux, totmod, chunk, porder=polyorder, ooe=ooe) # phase = ((jd - tpe) % period) / period # sorting = np.argsort(phase) # nopoly = (totpol[sorting] == 1.) # if (np.sum(nopoly)>0) and (np.sum(nopoly)<len(totpol)*0.1): # _totpol = totpol[sorting] # tmp = np.interp(phase[sorting][nopoly], phase[sorting][~nopoly], flux[sorting][~nopoly]/totpol[sorting][~nopoly]) # #print np.sum(nopoly), np.sum(~nopoly) # _totpol[nopoly] = flux[sorting][nopoly] / tmp # totpol[sorting] = _totpol return totmod, totpol @staticmethod def ephem(N, period, tpe): return np.arange(N)*period + tpe def ilnlike(self, fisopars, lc_constraints=None, ebv_dist=None, ebv_arr=None, residual=False, retpars=False): """Computes log likelihood of isochrone fit portion of KEBLAT Parameters ---------- fisopars : dict or float array either from lmfit parameter class or just an array of vals residual: boolean True if want to return residual array False if return loglikelihood val Returns ------- loglike : float returns -np.inf if model mags have invalid values isores : float array if residual = True """ if self.coeval: parnames = parnames_dict['sed'] else: parnames = parnames_dict['sed2'] isopars=np.empty(len(parnames)) for jj in range(len(parnames)): try: isopars[jj] = fisopars[parnames[jj]].value except KeyError: isopars[jj] = 0.0 except ValueError: isopars[jj] = fisopars[jj] except IndexError: isopars[jj] = fisopars[jj] if ebv_arr is not None: isopars[5] = np.interp(isopars[4], ebv_dist, ebv_arr) if retpars: return isopars # m1 = isopars[0] / (1. + isopars[1]) # m2 = isopars[0] / (1. + 1./isopars[1]) # isopars[0], isopars[1] = m1, m2 # print m1, m2, isopars[0], isopars[1] isoerr = np.exp(isopars[-1]) magsmod = self.isofit(isopars) #/ np.sqrt(self.emagsobs**2 + isoerr**2) if np.isinf(self.r2) or np.isinf(self.r1): if lc_constraints is None: lc_block = np.array([]) else: lc_block = lc_constraints if residual: return np.ones(len(self.magsobs) + len(lc_block))*1e12 else: return -np.inf, str((0,0,0)) if np.any(np.isinf(magsmod)): if residual: return np.ones(len(self.magsobs) + len(lc_constraints))*1e12 return -np.inf, str((0,0,0)) lc_inputs = np.array([(self.r1+self.r2)/(isopars[0])**(1./3.), self.r2/self.r1, self.frat]) if lc_constraints is None: lc_priors = np.array([]) else: lc_priors = (lc_inputs-lc_constraints)/(np.array([0.05, 0.02, 0.02]) * lc_constraints) # lc_uncertainty = np.array([0.002, 0.002, 0.002]) isores = np.concatenate(((magsmod - self.magsobs) / np.sqrt(self.emagsobs**2 + isoerr**2), lc_priors)) #/ np.sqrt(self.emagsobs**2 + isoerr**2) for ii, dii, jj in zip([self.armstrongT1, self.armstrongT2], [self.armstrongdT1, self.armstrongdT2], [10**self.temp1, 10**self.temp2]): if ii is not None: if dii is None: dii=0.05*ii isores = np.append(isores, (ii-jj)/dii) if residual: return isores chisq
WASM_OP_Code.section_code_dict['type']) tmp_obj.insert(0, "01") # tmp_obj.insert(0, '01') self.Obj_Header = tmp_obj def PrintTypeHeaderObj(self): # print(self.Obj_Header) for byte in self.Obj_Header: print(byte) def Dump_Obj_STDOUT(self): for bytecode in self.Obj_file: print(bytecode) # reads a wasm-obj file, returns a parsedstruct that holds all the sections' # bytecode, their section type and their length def ReadWASM(file_path, endianness, is_extended_isa, dbg): temp_obj_file = [] wasm_file = open(file_path, "rb") parsedstruct = ParsedStruct() # read the magic cookie byte = wasm_file.read(WASM_OP_Code.uint32) if byte != WASM_OP_Code.magic_number.to_bytes(WASM_OP_Code.uint32, byteorder=endianness, signed=False): raise Exception("bad magic cookie") # read the version number byte = wasm_file.read(WASM_OP_Code.uint32) if byte != WASM_OP_Code.version_number.to_bytes(WASM_OP_Code.uint32, byteorder=endianness, signed=False): raise Exception("bad version number") else: parsedstruct.version_number = byte while True: byte = wasm_file.read(1) if byte != b'': temp_obj_file.append(int.from_bytes(byte, byteorder='big', signed=False)) else: break offset = 0 loop = True while loop: try: # section_id, offset, dummy = Read(temp_obj_file, offset, 'varuint7') section_id, offset, dummy = Read(temp_obj_file, offset, 'varuint32') except IndexError: break payload_length, offset, dummy = Read(temp_obj_file, offset, 'varuint32') if section_id == 0: is_custom_section = True name_len, offset, dummy = Read(temp_obj_file, offset, 'varuint32') name = temp_obj_file[offset : offset + name_len] offset += name_len if name.find("reloc", 0, 5) == 0: is_reloc_section = True reloc_entry_count = Read(temp_obj_file, offset, 'varuint32') for i in range(0, reloc_entry_count): reloc_entry, offset, dummy = Read(tmp_obj, offset, 'varuint32') reloc_entries.append(reloc_entry) else: is_custom_section = False name_len = 0 name = '' dummy = 0 payload_data = temp_obj_file[offset:offset + payload_length - name_len - dummy] offset += payload_length - name_len - dummy # @DEVI-the second field is for general use. it is unused right # now so we are filling it with jojo. parsedstruct.section_list.append([section_id, 'jojo', payload_length, is_custom_section, name_len, name, payload_data]) # prints out the sections in the wasm object for section in parsedstruct.section_list: pass #print(section) wasm_file.close() return(parsedstruct) # Receives a parsedstruct returned from ReadWASM, parses all the sections and # fills up a module class. the parse method, then can return the module. # the returned class objects are all defined in section_structs.py. class ObjReader(object): def __init__(self, parsedstruct): self.parsedstruct = parsedstruct # we use this method to read the operands of instructions. it's only # called by ReadCodeSection def Disassemble(self, section_byte, offset): # @DEVI-FIXME- not sure why i was using instruction. its a string... matched = False read_bytes = 0 read_bytes_temp = 0 read_bytes_temp_iter = 0 instruction = str() operands = [] temp_wasm_ins = WASM_Ins() # @DEVI-FIXME-for v1.0 opcodes. needs to get fixed for extended # op-codes. ideally the mosule should hold its version number so we can # check it here. byte = format(section_byte[6][offset], '02x') offset += 1 read_bytes += 1 for op_code in WASM_OP_Code.all_ops: if op_code[1] == byte: matched = True # br_table has special immediates # @DEVI-FIXME-this is costing us quite dearly for every opcode # we read(at least two ticks per opcode). I could have the # br_table opcode done separately but kinda hurts the codes # uniformity. anyways. if op_code[1] == '0e': matched = True temp, offset, read_bytes_temp_iter = Read(section_byte[6], offset, op_code[3][0]) instruction += repr(temp) + ' ' operands.append(repr(temp)) read_bytes_temp += read_bytes_temp_iter for target_table in range(0, temp): temp, offset, read_bytes_temp_iter = Read(section_byte[6], offset, op_code[3][1]) read_bytes_temp += read_bytes_temp_iter instruction += repr(temp) + ' ' operands.append(repr(temp)) temp, offset, read_bytes_temp_iter = Read(section_byte[6], offset, op_code[3][2]) instruction += repr(temp) + ' ' operands.append(repr(temp)) read_bytes_temp += read_bytes_temp_iter elif op_code[2]: if isinstance(op_code[3], tuple): for i in range(0, len(op_code [3])): temp, offset, read_bytes_temp_iter = Read(section_byte[6], offset, op_code[3][i]) read_bytes_temp += read_bytes_temp_iter instruction += repr(temp) + ' ' operands.append(repr(temp)) else: temp, offset, read_bytes_temp = Read(section_byte[6], offset, op_code[3]) instruction += repr(temp) operands.append(repr(temp)) temp_wasm_ins.opcode = op_code[0] temp_wasm_ins.opcodeint = int(byte, 16) #temp_wasm_ins.operands = instruction temp_wasm_ins.operands = operands instruction = str() operands = [] break read_bytes += read_bytes_temp return offset, matched, read_bytes, temp_wasm_ins # parses the code section. returns a Code_Section class def ReadCodeSection(self): offset = 0 CS = Code_Section() temp_func_bodies = Func_Body() temp_local_entry = Local_Entry() section_exists = False for whatever in self.parsedstruct.section_list: # 10 is the code section if whatever[0] == 10: code_section = whatever.copy() section_exists = True if not section_exists: return None fn_cn, offset, dummy = Read(code_section[6], offset, 'varuint32') function_cnt = fn_cn CS.count = function_cnt while function_cnt > 0: function_body_length, offset, dummy = Read(code_section[6], offset, 'varuint32') temp_func_bodies.body_size = function_body_length local_count, offset, dummy = Read(code_section[6], offset, 'varuint32') temp_func_bodies.local_count = local_count # local_count_size will eventually hold how many bytes we will read # in total because of the local section local_count_size = dummy if local_count != 0: for i in range(0, local_count): partial_local_count, offset, dummy = Read(code_section[6], offset, 'varuint32') local_count_size += dummy partial_local_type, offset, dummy = Read(code_section[6], offset, 'uint8') local_count_size += dummy temp_local_entry.count = partial_local_count temp_local_entry.type = partial_local_type temp_func_bodies.locals.append(deepcopy(temp_local_entry)) local_count -= partial_local_count else: pass read_bytes_so_far = local_count_size for i in range(0, function_body_length - local_count_size): offset, matched, read_bytes, temp_wasm_ins = self.Disassemble(code_section, offset) temp_func_bodies.code.append(deepcopy(temp_wasm_ins)) if not matched: print(Colors.red + 'did not match anything' + Colors.ENDC) print(Colors.red + 'code section offset: ' + repr(offset) + Colors.ENDC) print(Colors.red + 'read bytes: ' + repr(read_bytes) + Colors.ENDC) print(Colors.red + 'wasm ins: ' + repr(temp_wasm_ins.opcode) + Colors.ENDC) for iter in temp_func_bodies.code: print(iter.opcode) print(iter.operands) sys.exit(1) else: pass matched = False read_bytes_so_far += read_bytes if read_bytes_so_far == function_body_length: break CS.func_bodies.append(deepcopy(temp_func_bodies)) temp_func_bodies.locals = [] temp_func_bodies.code = [] function_cnt -= 1 return(CS) # parsed the data section. returns a Data_Section class def ReadDataSection(self): loop = True section_exists = False offset = 0 DS = Data_Section() temp_data_segment = Data_Segment() init_expr = [] for whatever in self.parsedstruct.section_list: if whatever[0] == 11: data_section = whatever.copy() section_exists = True if not section_exists: return None data_entry_count, offset, dummy = Read(data_section[6], offset, 'varuint32') DS.count = data_entry_count while data_entry_count != 0: linear_memory_index, offset, dummy = Read(data_section[6], offset, 'varuint32') temp_data_segment.index = linear_memory_index # reading in the init-expr while loop: # @DEVI-FIXME-this only works for none extended opcodes if data_section[6][offset] == 0x0b: loop = False data_char, offset, dummy = Read(data_section[6], offset, 'uint8') init_expr.append(data_char) temp_data_segment.offset = init_expr data_entry_length, offset, dummy = Read(data_section[6], offset, 'varuint32') temp_data_segment.size = data_entry_length data_itself = data_section[6][offset:offset + data_entry_length] temp_data_segment.data = data_itself offset += data_entry_length DS.data_segments.append(deepcopy(temp_data_segment)) data_entry_count -= 1 init_expr = [] loop = True return(DS) # parses the import section. returns an Import_Section class def ReadImportSection(self): offset = 0 section_exists = False module_name = [] field_name = [] IS = Import_Section() temp_import_entry = Import_Entry() for whatever in self.parsedstruct.section_list: if whatever[0] == 2: import_section = whatever.copy() section_exists = True if not section_exists: return None import_cnt, offset, dummy = Read(import_section[6], offset, 'varuint32') IS.count = import_cnt while import_cnt != 0: module_length, offset, dummy = Read(import_section[6], offset, 'varuint32') temp_import_entry.module_len = module_length for i in range(0, module_length): module_name.append(import_section[6][offset + i]) temp_import_entry.module_str = module_name offset += module_length field_length, offset, dummy = Read(import_section[6], offset, 'varuint32') temp_import_entry.field_len = field_length for i in range(0, field_length): field_name.append(import_section[6][offset + i]) temp_import_entry.field_str = field_name offset += field_length kind, offset, dummy = Read(import_section[6], offset, 'uint8') temp_import_entry.kind = kind # function type if kind == 0: import_type, offset, dummy = Read(import_section[6], offset, 'varuint32') temp_import_entry.type = import_type # table type elif kind == 1: table_type = Table_Type() table_type.elemet_type, offset, dummy = Read(import_section[6], offset, 'varint7') rsz_limits = Resizable_Limits() rsz_limits.flags, offset, dummy = Read(import_section[6], offset, 'varuint1') rsz_limits.initial, offset, dummy = Read(import_section[6], offset, 'varuint32') if rsz_limits.flags: rsz_limits.maximum, offset, dummy = Read(import_section[6], offset, 'varuint32') table_type.limit = rsz_limits temp_import_entry.type = table_type elif kind == 2: memory_type = Memory_Type() rsz_limits = Resizable_Limits() rsz_limits.flags, offset, dummy = Read(import_section[6], offset, 'varuint1') rsz_limits.initial, offset, dummy = Read(import_section[6], offset, 'varuint32') if rsz_limits.flags: rsz_limits.maximum, offset, dummy = Read(import_section[6], offset, 'varuint32') memory_type.limits = rsz_limits temp_import_entry.type = memory_type elif kind == 3: global_type = Global_Type() global_type.content_type, offset, dummy = Read(import_section[6], offset, 'uint8') global_type.mutability, offset, dummy = Read(import_section[6], offset, 'varuint1') temp_import_entry.type = global_type IS.import_entry.append(deepcopy(temp_import_entry)) import_cnt -= 1 module_name = [] field_name = [] return(IS) # parses the export section, returns an Export_Section class def ReadExportSection(self): offset = 0 section_exists = False field_name = [] ES = Export_Section() temp_export_entry = Export_Entry() for whatever in self.parsedstruct.section_list: if whatever[0] == 7: export_section = whatever.copy() section_exists = True if not section_exists: return None export_entry_cnt, offset, dummy = Read(export_section[6], offset, 'varuint32') ES.count = export_entry_cnt while export_entry_cnt != 0: field_length,
data monitor (true | false)", display_name="Data Monitor", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.LOG_DISPLAY_TIME, r'Monitor\s+(?:\w+\s+){1}(\w+)', lambda match: True if match.group(1) == ON else False, lambda x: YES if x else NO, visibility=ParameterDictVisibility.IMMUTABLE, regex_flags=re.DOTALL, default_value=True, description="Enable log display time while monitoring (true | false)", display_name="Log Display Time", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.LOG_DISPLAY_FRACTIONAL_SECOND, r'Monitor\s+(?:\w+\s+){2}(\w+)', lambda match: True if match.group(1) == ON else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.IMMUTABLE, default_value=True, description="Enable log/display time with fractional seconds (true | false)", display_name="Display Fractional Seconds", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.LOG_DISPLAY_ACOUSTIC_AXIS_VELOCITIES, r'Monitor\s+(?:\w+\s+){3}(\w+)', lambda match: False if match.group(1) == OFF else True, lambda x: YES if x else NO, regex_flags=re.DOTALL, startup_param=True, direct_access=True, default_value=True, description="Enable log/display format acoustic axis velocities (true | false)", display_name="Acoustic Axis Velocities", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.LOG_DISPLAY_ACOUSTIC_AXIS_VELOCITIES_FORMAT, r'Monitor\s+(?:\w+\s+){3}(\w+)', lambda match: match.group(1), str, regex_flags=re.DOTALL, startup_param=True, direct_access=True, default_value='H', description="Format: (H:Hexadecimal | D:Decimal | S:SI units cm/s)", display_name="Format of Acoustic Axis Velocities", type=ParameterDictType.STRING)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.QUERY_MODE, r'.*Q\| Query Mode\s+(\w+).*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=False, startup_param=True, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_QUERY, description="Enable query mode (true | false)", display_name="Query Mode", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.FREQUENCY, r'4\| Measurement Frequency\s+(%(float)s)\s+\[Hz\]' % common_matches, lambda match: float(match.group(1)), self._float_to_string, regex_flags=re.DOTALL, default_value=1.0, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_FREQUENCY, description="Measurement rate: (0.01 - 50.0)", display_name="Measurement Frequency", type=ParameterDictType.FLOAT, units=Units.HERTZ)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.MEASUREMENTS_PER_SAMPLE, r'5\| Measurements/Sample\s+(%(int)s)\s+\[M/S\]' % common_matches, lambda match: int(match.group(1)), self._int_to_string, regex_flags=re.DOTALL, default_value=1, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_MEAS_PER_SAMPLE, description="Number of measurements averaged: (1 - 10000)", display_name="Measurements per Sample", type=ParameterDictType.INT)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SAMPLE_PERIOD, '6\| Sample Period\s+(%(float)s)' % common_matches, lambda match: float(match.group(1)), self._float_to_string, regex_flags=re.DOTALL, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_SAMPLE_PERIOD, description="Interval between samples: (0.02 - 10000)", display_name="Sample Period", type=ParameterDictType.FLOAT, units=Units.SECOND)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SAMPLES_PER_BURST, r'7\| Samples/Burst\s+(%(int)s)\s+\[S/B\]' % common_matches, lambda match: int(match.group(1)), self._int_to_string, regex_flags=re.DOTALL, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_SAMPLES_PER_BURST, description="Number of samples in a burst: (1 to 100000)", display_name="Samples per Burst", type=ParameterDictType.INT)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.BURST_INTERVAL_DAYS, r'8\| Burst Interval\s+(%(int)s)\s+(%(int)s):(%(int)s):(%(int)s)' % common_matches, lambda match: int(match.group(1)), self._int_to_string, regex_flags=re.DOTALL, default_value=0, menu_path_read=SubMenues.DEPLOY, submenu_read=None, menu_path_write=SubMenues.DEPLOY, submenu_write=InstrumentCmds.SET_BURST_INTERVAL_DAYS, description="Day interval between bursts: (0=continuous sampling, 1 - 366)", display_name="Burst Interval Days", type=ParameterDictType.INT, units=Units.DAY)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.BURST_INTERVAL_HOURS, r'8\| Burst Interval\s+(%(int)s)\s+(%(int)s):(%(int)s):(%(int)s)' % common_matches, lambda match: int(match.group(2)), self._int_to_string, regex_flags=re.DOTALL, default_value=0, description="Hour interval between bursts: (0=continuous sampling, 1 - 23)", display_name="Burst Interval Hours", type=ParameterDictType.INT, units=Units.HOUR)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.BURST_INTERVAL_MINUTES, r'8\| Burst Interval\s+(%(int)s)\s+(%(int)s):(%(int)s):(%(int)s)' % common_matches, lambda match: int(match.group(3)), self._int_to_string, regex_flags=re.DOTALL, default_value=0, description="Minute interval between bursts: (0=continuous sampling, 1 - 59)", display_name="Burst Interval Minutes", type=ParameterDictType.INT, units=Units.MINUTE)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.BURST_INTERVAL_SECONDS, r'8\| Burst Interval\s+(%(int)s)\s+(%(int)s):(%(int)s):(%(int)s)' % common_matches, lambda match: int(match.group(4)), self._int_to_string, regex_flags=re.DOTALL, default_value=0, description="Seconds interval between bursts: (0=continuous sampling, 1 - 59)", display_name="Burst Interval Seconds", type=ParameterDictType.INT, units=Units.SECOND)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SI_CONVERSION, r'<C> Binary to SI Conversion\s+(%(float)s)' % common_matches, lambda match: float(match.group(1)), self._float_to_string, regex_flags=re.DOTALL, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_SI_CONVERSION, description="Coefficient to use during conversion from binary to SI: (0.0010000 - 0.0200000)", display_name="SI Conversion Coefficient", type=ParameterDictType.FLOAT)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.WARM_UP_INTERVAL, r'.*<W> Warm up interval\s+(\w)\w*\s+.*', lambda match: match.group(1), lambda string: str(string), regex_flags=re.DOTALL, startup_param=True, default_value='F', visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_WARM_UP_INTERVAL, description="Adjusts warm up time to allow for working with auxiliary sensors " "that have slower response times to get the required accuracy: (F:Fast | S:Slow)", display_name="Warm Up Interval for Sensors", type=ParameterDictType.ENUM)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.THREE_AXIS_COMPASS, r'.*<1> 3-Axis Compass\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=True, startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_THREE_AXIS_COMPASS, description="Enable 3-axis compass sensor (true | false)", display_name="3-axis Compass Sensor", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SOLID_STATE_TILT, r'.*<2> Solid State Tilt\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=True, startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_SOLID_STATE_TILT, description="Enable the solid state tilt sensor: (true | false)", display_name="Solid State Tilt Sensor", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.THERMISTOR, r'.*<3> Thermistor\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda string: bool_to_on_off(string), regex_flags=re.DOTALL, default_value=True, startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_THERMISTOR, description="Enable the thermistor sensor (true | false)", display_name="Thermistor Sensor", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.PRESSURE, r'.*<4> Pressure\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=False, # this parameter can only be set to NO (meaning disabled) # support for setting it to YES has not been implemented startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_PRESSURE, description="Enable the pressure sensor (true | false)", display_name="Pressure Sensor", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.AUXILIARY_1, r'.*<5> Auxiliary 1\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=False, # this parameter can only be set to NO (meaning disabled) # support for setting it to YES has not been implemented startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_AUXILIARY, description="Enable auxiliary sensor 1 (true | false)", display_name="Auxiliary sensor 1", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.AUXILIARY_2, r'.*<6> Auxiliary 2\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=False, # this parameter can only be set to NO (meaning disabled) # support for setting it to YES has not been implemented startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_AUXILIARY, description="Enable auxiliary sensor 2 (true | false)", display_name="Auxiliary sensor 2", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.AUXILIARY_3, r'.*<7> Auxiliary 3\s+(\w+)\s+.*', lambda match: True if match.group(1) == ENABLED else False, lambda x: YES if x else NO, regex_flags=re.DOTALL, default_value=False, # this parameter can only be set to NO (meaning disabled) # support for setting it to YES has not been implemented startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_AUXILIARY, description="Enable auxiliary sensor 3 (true | false)", display_name="Auxiliary sensor 3", type=ParameterDictType.BOOL)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SENSOR_ORIENTATION, r'.*<O> Sensor Orientation\s+(.*)\n.*', lambda match: self._parse_sensor_orientation(match.group(1)), lambda string: str(string), regex_flags=re.DOTALL, default_value='2', startup_param=True, visibility=ParameterDictVisibility.IMMUTABLE, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=SubMenues.CONFIGURATION, submenu_write=InstrumentCmds.SET_SENSOR_ORIENTATION, display_name="Sensor Orientation", type=ParameterDictType.ENUM, value_description="Orientation: (1:Vertical/Down | 2:Vertical/Up | 3:Horizontal/Straight | " "4:Horizontal/Bent Left | 5:Horizontal/Bent Right | 6:Horizontal/Bent Down" " | 7:Horizontal/Bent Up)")) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.SERIAL_NUMBER, r'.*<S> Serial Number\s+(\w+)\s+.*', lambda match: match.group(1), lambda string: str(string), regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CONFIGURATION, submenu_read=None, menu_path_write=None, submenu_write=None, description="The instrument serial number", display_name="Serial Number", type=ParameterDictType.INT)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.VELOCITY_OFFSET_PATH_A, r'.*Current path offsets:\s+(\w+)\s+.*', lambda match: int(match.group(1), 16), lambda num: '{:04x}'.format(num), regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.VELOCITY_OFFSETS, menu_path_write=None, submenu_write=None, description="The velocity offset value for path A: (3328 - 62208)", display_name="Velocity Offset Path A", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.VELOCITY_OFFSET_PATH_B, r'.*Current path offsets:\s+\w+\s+(\w+)\s+.*', lambda match: int(match.group(1), 16), lambda num: '{:04x}'.format(num), regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.VELOCITY_OFFSETS, menu_path_write=None, submenu_write=None, description="The velocity offset value for path B: (3328 - 62208)", display_name="Velocity Offset Path B", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.VELOCITY_OFFSET_PATH_C, r'.*Current path offsets:\s+\w+\s+\w+\s+(\w+)\s+.*', lambda match: int(match.group(1), 16), lambda num: '{:04x}'.format(num), regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.VELOCITY_OFFSETS, menu_path_write=None, submenu_write=None, description="The velocity offset value for path C: (3328 - 62208)", display_name="Velocity Offset Path C", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.VELOCITY_OFFSET_PATH_D, r'.*Current path offsets:\s+\w+\s+\w+\s+\w+\s+(\w+)\s+.*', lambda match: int(match.group(1), 16), lambda num: '{:04x}'.format(num), regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.VELOCITY_OFFSETS, menu_path_write=None, submenu_write=None, description="The velocity offset value for path D: (3328 - 62208)", display_name="Velocity Offset Path D", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_OFFSET_0, r'Current compass offsets:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)' % common_matches, lambda match: int(match.group(1)), self._int_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_OFFSETS, menu_path_write=None, submenu_write=None, display_name="Compass Offset 0", description="The offset value for compass 0: (-400 - 400)", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_OFFSET_1, r'Current compass offsets:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)' % common_matches, lambda match: int(match.group(2)), self._int_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_OFFSETS, menu_path_write=None, submenu_write=None, display_name="Compass Offset 1", description="The offset value for compass 1: (-400 - 400)", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_OFFSET_2, r'Current compass offsets:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)' % common_matches, lambda match: int(match.group(3)), self._int_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_OFFSETS, menu_path_write=None, submenu_write=None, display_name="Compass Offset 2", description="The offset value for compass 2: (-400 - 400)", type=ParameterDictType.INT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_SCALE_FACTORS_0, r'Current compass scale factors:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)' % common_matches, lambda match: float(match.group(1)), self._float_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_SCALE_FACTORS, menu_path_write=None, submenu_write=None, display_name="Compass Scale Factor 0", description="The scale factor for compass 0: (0.200 - 5.000)", type=ParameterDictType.FLOAT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_SCALE_FACTORS_1, r'Current compass scale factors:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)\s+' % common_matches, lambda match: float(match.group(2)), self._float_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_SCALE_FACTORS, menu_path_write=None, submenu_write=None, display_name="Compass Scale Factor 1", description="The scale factor for compass 1: (0.200 - 5.000)", type=ParameterDictType.FLOAT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.COMPASS_SCALE_FACTORS_2, r'Current compass scale factors:\s+(%(float)s)\s+(%(float)s)\s+(%(float)s)\s+' % common_matches, lambda match: float(match.group(3)), self._float_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.COMPASS_SCALE_FACTORS, menu_path_write=None, submenu_write=None, display_name="Compass Scale Factor 2", description="The scale factor for compass 2: (0.200 - 5.000)", type=ParameterDictType.FLOAT, units=Units.COUNTS)) self._param_dict.add_parameter( RegexParameter(InstrumentParameters.TILT_PITCH_OFFSET, r'Current tilt offsets:\s+(%(int)s)\s+(%(int)s)\s+' % common_matches, lambda match: int(match.group(1)), self._int_to_string, regex_flags=re.DOTALL, visibility=ParameterDictVisibility.READ_ONLY, value=-1, # to indicate that the parameter has not been read from the instrument menu_path_read=SubMenues.CALIBRATION, submenu_read=InstrumentCmds.TILT_OFFSETS, menu_path_write=None, submenu_write=None, description="Tilt offset for pitch axis: (0 to 30000)", display_name="Tilt Offset (Pitch)",
users_graph['valueAxes'][0]['title'] = 'Points' users_graph['graphs'] = users_graphs users_graph['dataProvider'] = sorted(users_data_provider, key=lambda x: x['date']) # compute the cart of challenge solvers over time for each challenge challenges_graphs = [] challenges_data_provider = [] for i, chal in enumerate(chals.values()): challenges_graphs.append({ "id": "mygraph-{}".format(i), "title": chal['name'], "valueField": "column-{}".format(i), "type": "line", "lineThickness": 3, "balloonText": "[[title]] solved by [[value]]"}) for i, chal in enumerate(challenges_graph_dict.values()): chal_aux = chal chal_aux.append([date_now.timestamp(), 0]) chal_aux = sorted(chal_aux, key=lambda x: x[0]) for j in range(1, len(chal_aux)): chal_aux[j][1] += chal_aux[j - 1][1] # finally add the newly created list to the data_provider list for ts, solvers in chal_aux: challenges_data_provider.append({ "date": datetime.datetime.fromtimestamp(int(ts)).strftime('%Y-%m-%d %H:%M:%S'), "column-{}".format(i): solvers }) challenges_graph = deepcopy(graph_template) challenges_graph['titles'][0]['text'] = 'Challenges' challenges_graph['valueAxes'][0]['title'] = 'Solvers' challenges_graph['graphs'] = challenges_graphs challenges_graph['dataProvider'] = challenges_data_provider return {'challenges': chals, 'scoreboard': scoreboard, 'users_graph': users_graph, 'challenges_graph': challenges_graph } @app.route('/challenge/<name>', methods=['GET', 'POST']) @jeopardy_mode_required @login_required def challenge(name): """Display information about a challenge plus the flag submission form and the writeup.""" db_conn = db_connect() cur = db_conn.cursor() # get challenge data if the challenge exists cur.execute('SELECT * FROM challenges WHERE name = %s', [name]) challenge = cur.fetchone() # if the challenge is not valid abort if challenge is None: cur.close() abort(404) # check if the current user already solved the challenge cur.execute(('SELECT * FROM challenge_attacks ' 'WHERE user_id = %s AND challenge_id = %s'), [current_user.id, challenge['id']]) solved = cur.fetchone() is not None # get the list of all the writeups submitted by this user for this challenge cur.execute(('SELECT id, timestamp FROM writeups ' 'WHERE user_id = %s AND challenge_id = %s ' 'ORDER BY id DESC'), [current_user.id, challenge['id']]) writeups = cur.fetchall() # get the evaluation for this challenge evaluation = None if writeups: cur.execute(('SELECT feedback, grade, timestamp FROM challenges_evaluations ' 'WHERE user_id = %s AND challenge_id = %s '), [current_user.id, challenge['id']]) evaluation = cur.fetchone() graded = evaluation is not None and evaluation['grade'] is not None # retrieve the writeup form, if any writeup_form = ctforge.forms.ChallengeWriteupForm(writeup=challenge['writeup_template']) # retrive the flag form flag_form = ctforge.forms.ChallengeFlagForm() # accept POST requests only if the challenge is active if request.method == 'POST' and challenge['active']: # process the two mutually exclusive forms writeup_data = request.form.get('writeup') flag = request.form.get('flag') if writeup_data is not None: # only allow writeup submission if writeup support is enabled for this chal if challenge['writeup'] and writeup_form.validate_on_submit(): if graded: # writeup already submitted, resubmission allowed only if there's no grade flash('Your submission has already been graded, you cannot modify it', 'error') else: writeup_data = writeup_form.writeup.data try: # save this writeup into the db cur.execute(('INSERT INTO writeups (user_id, challenge_id, writeup) ' 'VALUES (%s, %s, %s) RETURNING id'), [current_user.id, challenge['id'], writeup_data]) writeup_id = cur.fetchone()['id'] cur.close() db_conn.commit() flash('Writeup added', 'success') except psycopg2.Error as e: db_conn.rollback() error_msg = 'Unknown database error: {}'.format(e) flash(error_msg, 'error') app.logger.error(error_msg) else: flash_errors(writeup_form) else: if cur is None: cur = db_conn.cursor() if flag is not None and flag_form.validate_on_submit(): flag = flag_form.flag.data if flag == challenge['flag']: try: # save this attack into the db cur.execute(( 'INSERT INTO challenge_attacks (user_id, challenge_id) ' 'VALUES (%s, %s)'), [current_user.id, challenge['id']]) cur.close() db_conn.commit() flash('Flag accepted!', 'success') except psycopg2.IntegrityError: # this exception is raised not only on duplicated entry, # but also when key constraint fails db_conn.rollback() flash('You already solved this challenge') except psycopg2.Error as e: db_conn.rollback() error_msg = 'Unknown database error: {}'.format(e) flash(error_msg, 'error') app.logger.error(error_msg) else: flash('Invalid flag', 'error') else: flash_errors(flag_form) # close the pending connection to the database db_conn.close() return redirect(url_for('challenge', name=challenge['name'])) db_conn.close() return render_template('challenge.html', flag_form=flag_form, writeup_form=writeup_form, challenge=challenge, evaluation=evaluation, solved=solved, graded=graded, writeups=writeups) @app.route('/writeup/<int:id>') @app.route('/writeup/<int:id>/<int:md>') @jeopardy_mode_required @login_required def writeup(id, md=0): """Display the provided writeup.""" db_conn = get_db_connection() with db_conn.cursor() as cur: # get the writeup data if it exists cur.execute(( 'SELECT W.id AS id, W.writeup AS writeup, W.timestamp AS timestamp, ' ' U.id AS user_id, U.name AS user_name, U.surname AS user_surname, ' ' C.id AS challenge_id, C.name AS challenge_name, C.points AS challenge_points ' 'FROM writeups AS W ' 'JOIN users AS U ON W.user_id = U.id ' 'JOIN challenges AS C ON W.challenge_id = C.id ' 'WHERE W.id = %s'), [id]) writeup = cur.fetchone() # grant access to the author or admin if writeup is not None and (writeup['user_id'] == current_user.id or current_user.admin): with db_conn.cursor() as cur: cur.execute(( 'SELECT id, timestamp FROM writeups ' 'WHERE user_id = %s AND challenge_id = %s' 'ORDER BY timestamp DESC'), [writeup['user_id'], writeup['challenge_id']]) writeups = cur.fetchall() return render_template('writeup.html', writeup=writeup, writeups=writeups, md=md) abort(404) @app.route('/service/<name>') @attackdefense_mode_required @login_required def service(name): """Display information about a service.""" db_conn = get_db_connection() with db_conn.cursor() as cur: # get service data if the service exists cur.execute('SELECT * FROM services WHERE name = %s', [name]) service = cur.fetchone() if service is None: abort(404) return render_template('service.html', service=service) @app.route('/teams') @attackdefense_mode_required def teams(): """Print teams data.""" db_conn = get_db_connection() with db_conn.cursor() as cur: # get teams cur.execute('SELECT id, name, ip FROM teams ORDER BY id') teams = cur.fetchall() # get users cur.execute(('SELECT id, team_id, name, surname ' 'FROM users ORDER BY id')) users = cur.fetchall() return render_template('teams.html', teams=teams, users=users) @app.route('/scoreboard') @attackdefense_mode_required def scoreboard(): # get the latest round db_conn = get_db_connection() with db_conn.cursor() as cur: cur.execute('SELECT id AS rnd, timestamp FROM rounds ORDER BY id DESC LIMIT 1') res = cur.fetchone() rnd = res['rnd']-1 if res is not None and res['rnd'] else 0 # get the time left until the next round date_now = datetime.datetime.now() seconds_left = app.config['ROUND_DURATION'] if rnd >= 1: # get seconds left till new round seconds_left = max(((res['timestamp'] + datetime.timedelta(seconds=app.config['ROUND_DURATION'])) - date_now).seconds, 0) # get all the other stuff out of the cached function scoreboard_data = _scoreboard(rnd) return render_template('scoreboard.html', rnd=rnd, time_left=seconds_left, **scoreboard_data) <EMAIL>(timeout=60) def _scoreboard(rnd, mode = 'default'): db_conn = get_db_connection() cur = db_conn.cursor() # retrieve the service table cur.execute('SELECT id, name, active FROM services') services = cur.fetchall() # retrieve the latest score of each team along with the team names' cur.execute(( 'SELECT T.id, T.name, T.ip, S.attack, S.defense ' 'FROM scores as S JOIN teams as T ON S.team_id = T.id ' 'WHERE round = %s'), [rnd]) results = cur.fetchall() # start populating the board, it's a dictionary of dictionaries, see # the initialization below to grasp the structure board = {} for r in results: board[r['id']] = { 'team': r['name'], 'ip': r['ip'], 'id': r['id'], 'attack': r['attack'], 'defense': r['defense'], 'ratio_attack': 0, 'ratio_defense': 0, 'position': 0, 'services': {}, 'attack_scores': [], 'defense_scores': [], 'total_scores': [] } # get services status cur.execute(( 'SELECT F.team_id, F.service_id, C.successful, MAX(C.timestamp) AS timestamp ' 'FROM active_flags AS F ' 'LEFT JOIN integrity_checks AS C ON ' ' (F.flag = C.flag AND C.timestamp = (SELECT MAX(timestamp) ' ' FROM integrity_checks ' ' WHERE flag = F.flag)) ' 'GROUP BY F.team_id, F.service_id, C.successful')); services_status = cur.fetchall() for ss in services_status: board[ss['team_id']]['services'][ss['service_id']] = (ss['successful'], ss['timestamp']) # set default values for team_id in board: for service in services: try: _ = board[team_id]['services'][service['id']] except KeyError: board[team_id]['services'][service['id']] = (2, '???') # normalize scores avoiding divisions by 0. If the score table is empty # (it shouldn't, we can initialize it with 0s) assume the max scores to # be 0. The scoreboard will anyway result empty since the teams are # extracted from the score table if len(board): max_attack = max(max(team['attack'] for team in board.values()), 1) max_defense = max(max(team['defense'] for team in board.values()), 1) else: max_attack = max_defense = 0 # get the scores of all the teams during the whole game to create some # nice graphs cur.execute('SELECT * FROM scores ORDER BY round') scores = cur.fetchall() cur.close() for s in scores: board[s['team_id']]['attack_scores'].append([int(s['round']), int(s['attack'])]) board[s['team_id']]['defense_scores'].append([int(s['round']), int(s['defense'])]) board[s['team_id']]['total_scores'].append([int(s['round']), int(0.6 * s['attack'] + 0.4 * s['defense'])]) # The team scores. Should include the service checks here too? If corrupted or not? # Could just add x points for the service being up? Or, these could be put in as a percentage too? for team in board.values(): team['ratio_attack'] = team['attack'] * 100 / max_attack team['ratio_defense'] = team['defense'] * 100 / max_defense team['score'] = 0.5 * team['ratio_attack'] + 0.4 * team['ratio_defense'] # sort the board in descending order with respect to the score: the # sorted structure is a list of board values, we just
ID, MTS_CREATE, MTS_UPDATE ], ... ] Examples -------- :: positions = bfx_client.positions_history(limit=10) for position in positions: print(position) """ body = kwargs raw_body = json.dumps(body) path = "v2/auth/r/positions/hist" response = self._post(path, raw_body, verify=True) return response def positions_audit(self, **kwargs): """`Bitfinex positions audit reference <https://docs.bitfinex.com/reference#rest-auth-positions-audit>`_ Return and audit of the positions of a user that correspond to the ids send Parameters ---------- id : Optional list of ints List of position IDs to audit start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records Returns ------- list :: [ [ SYMBOL, STATUS, AMOUNT, BASE_PRICE, MARGIN_FUNDING, MARGIN_FUNDING_TYPE, PL, PL_PERC, PRICE_LIQ, LEVERAGE, ID, MTS_CREATE, MTS_UPDATE, TYPE, COLLATERAL, COLLATERAL_MIN, META ], ... ] Examples -------- :: positions = bfx_client.positions_audit([1, 2, 3]) for position in positions: print(position) """ body = kwargs raw_body = json.dumps(body) path = "v2/auth/r/positions/audit" response = self._post(path, raw_body, verify=True) return response def derivative_position_collateral(self): raise NotImplementedError def funding_offers(self, symbol=""): """`Bitfinex funding offers reference <https://docs.bitfinex.com/reference#rest-auth-funding-offers>`_ Get active funding offers. Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. Returns ------- list :: [ [ ID, SYMBOL, MTS_CREATED, MTS_UPDATED, AMOUNT, AMOUNT_ORIG, TYPE, _PLACEHOLDER, _PLACEHOLDER, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, ... ], ... ] Examples -------- :: bfx_client.funding_offers() bfx_client.funding_offers("fIOT") """ body = {} raw_body = json.dumps(body) path = "v2/auth/r/funding/offers/{}".format(symbol) response = self._post(path, raw_body, verify=True) return response def submit_funding_offer(self): raise NotImplementedError def cancel_funding_offer(self): raise NotImplementedError def cancel_all_funding_offers(self): raise NotImplementedError def funding_close(self): raise NotImplementedError def funding_auto_renew(self): raise NotImplementedError def keep_funding(self): raise NotImplementedError def funding_offers_history(self, symbol="", **kwargs): """`Bitfinex funding offers hist reference <https://docs.bitfinex.com/reference#rest-auth-funding-offers-hist>`_ Get past inactive funding offers. Limited to last 3 days. Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records Returns ------- list :: [ [ ID, SYMBOL, MTS_CREATED, MTS_UPDATED, AMOUNT, AMOUNT_ORIG, TYPE, _PLACEHOLDER, _PLACEHOLDER, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, ... ], ... ] Examples -------- :: bfx_client.funding_offers_history() bfx_client.funding_offers_history('fOMG') """ body = kwargs raw_body = json.dumps(body) add_symbol = "{}/".format(symbol) if symbol else "" path = "v2/auth/r/funding/offers/{}hist".format(add_symbol) response = self._post(path, raw_body, verify=True) return response def funding_loans(self, symbol=""): """`Bitfinex funding loans reference <https://docs.bitfinex.com/reference#rest-auth-funding-loans>`_ Funds not used in active positions Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. Returns ------- list :: [ [ ID, SYMBOL, SIDE, MTS_CREATE, MTS_UPDATE, AMOUNT, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, MTS_OPENING, MTS_LAST_PAYOUT, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, _PLACEHOLDER, NO_CLOSE, ... ], ... ] Example ------- :: bfx_client.funding_loans('fOMG') """ body = {} raw_body = json.dumps(body) path = "v2/auth/r/funding/loans/{}".format(symbol) response = self._post(path, raw_body, verify=True) return response def funding_loans_history(self, symbol="", **kwargs): """`Bitfinex funding loans history reference <https://docs.bitfinex.com/reference#rest-auth-funding-loans-hist>`_ Inactive funds not used in positions. Limited to last 3 days. Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records Returns ------- list :: [ [ ID, SYMBOL, SIDE, MTS_CREATE, MTS_UPDATE, AMOUNT, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, MTS_OPENING, MTS_LAST_PAYOUT, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, _PLACEHOLDER, NO_CLOSE, ... ], ... ] Example ------- :: bfx_client.funding_loans_history('fOMG') """ body = kwargs raw_body = json.dumps(body) add_symbol = "{}/".format(symbol) if symbol else "" path = "v2/auth/r/funding/loans/{}hist".format(add_symbol) response = self._post(path, raw_body, verify=True) return response def funding_credits(self, symbol=""): """`Bitfinex funding credits reference <https://docs.bitfinex.com/reference#rest-auth-funding-credits>`_ Funds used in active positions Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. Returns ------- list :: [ [ ID, SYMBOL, SIDE, MTS_CREATE, MTS_UPDATE, AMOUNT, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, MTS_OPENING, MTS_LAST_PAYOUT, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, _PLACEHOLDER, NO_CLOSE, ... ], ... ] Example ------- :: bfx_client.funding_credits('fUSD') """ body = {} raw_body = json.dumps(body) path = "v2/auth/r/funding/credits/{}".format(symbol) response = self._post(path, raw_body, verify=True) return response def funding_credits_history(self, symbol="", **kwargs): """`Bitfinex funding credits history reference <https://docs.bitfinex.com/reference#rest-auth-funding-credits-hist>`_ Inactive funds used in positions. Limited to last 3 days. Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records Returns ------- list :: [ [ ID, SYMBOL, SYMBOL, MTS_CREATE, MTS_UPDATE, AMOUNT, FLAGS, STATUS, _PLACEHOLDER, _PLACEHOLDER, _PLACEHOLDER, RATE, PERIOD, MTS_OPENING, MTS_LAST_PAYOUT, NOTIFY, HIDDEN, _PLACEHOLDER, RENEW, _PLACEHOLDER, NO_CLOSE, POSITION_PAIR, ... ], ... ] Example ------- :: bfx_client.funding_credits_history('fUSD') """ body = kwargs raw_body = json.dumps(body) add_symbol = "{}/".format(symbol) if symbol else "" path = "v2/auth/r/funding/credits/{}hist".format(add_symbol) response = self._post(path, raw_body, verify=True) return response def funding_trades(self, symbol="", **kwargs): """`Bitfinex funding trades hitory reference <https://docs.bitfinex.com/reference#rest-auth-funding-trades-hist>`_ Get funding trades Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records Returns ------- list :: [ [ ID, CURRENCY, MTS_CREATE, OFFER_ID, AMOUNT, RATE, PERIOD, MAKER, ... ], ... ] Example ------- :: bfx_client.funding_trades('fUSD') """ body = kwargs raw_body = json.dumps(body) add_symbol = "{}/".format(symbol) if symbol else "" path = "v2/auth/r/funding/trades/{}hist".format(add_symbol) response = self._post(path, raw_body, verify=True) return response def funding_info(self, tradepair): """`Bitfinex funding info reference <https://docs.bitfinex.com/reference#rest-auth-info-funding>`_ Get account funding info Parameters ---------- symbol : str The `symbol <restv1.html#symbols>`_ you want information about. Returns ------- list :: [ "sym", SYMBOL, [ YIELD_LOAN, YIELD_LEND, DURATION_LOAN, DURATION_LEND, ... ], ... ] Example ------- :: bfx_client.funding_info('fIOT') """ body = {} raw_body = json.dumps(body) path = "v2/auth/r/info/funding/{}".format(tradepair) response = self._post(path, raw_body, verify=True) return response def user_info(self): raise NotImplementedError def transfer_between_wallets(self, from_wallet, to_wallet, **kwargs): """`Bitfinex Transfer funds between wallets. Can also be used to convert USDT to USDT0 for derivatives trading. <https://docs.bitfinex.com/reference#rest-auth-transfer`_ Post transfer between wallets Parameters ---------- from_wallet : str Select the wallet from which to transfer (exchange, margin, funding (can also use the old labels which are exchange, trading and deposit respectively)) to_wallet: str Select the wallet from which to transfer (exchange, margin, funding (can also use the old labels which are exchange, trading and deposit respectively)) currency: str Select the currency that you would like to transfer (USD, UST, BTC, ....) currency_to: str (optional) Select the currency that you would like to exchange to (USTF0 === USDT for derivatives pairs) amount: str Select the amount to transfer email_dst: str (optional) Allows transfer of funds to a sub- or master-account identified by the associated email address Returns ------- list :: [ MTS, TYPE, MESSAGE_ID, null, [ MTS_UPDATE, WALLET_FROM, WALLET_TO, _PLACEHOLDER, CURRENCY, CURRENCY_TO, _PLACEHOLDER, AMOUNT ] CODE, STATUS, TEXT ] //Transfer [1568736745789,"acc_tf",null,null,[1568736745790,"margin","exchange",null,"USD",null,null,50],null, "SUCCESS","50.0 US Dollar transferred from Margin to Exchange"] //Transfer and conversion to USDT0 [1574173088379,"acc_tf",null,null,[1574173088379,"exchange","margin",null,"UST","USTF0",null,200],null, "SUCCESS","200.0 Tether USDt transfered from Exchange to Margin"] Example ------- :: bfx_client.transfer_between_wallets(from_wallet='margin', to_wallet='exchange', currency='USTF0', currency_to='UST' amount='1.31') """ body = { "from": from_wallet, "to": to_wallet, **kwargs } raw_body = json.dumps(body) path = "v2/auth/w/transfer" response = self._post(path, raw_body, verify=True) return response def deposit_address(self): raise NotImplementedError def withdrawal(self): raise NotImplementedError def movements(self, currency="", **kwargs): """`Bitfinex movements reference <https://docs.bitfinex.com/reference#rest-auth-movements>`_ View your past deposits/withdrawals. Parameters ---------- currency : str Currency (BTC, ...) start : Optional int Millisecond start time end : Optional int Millisecond end time limit : Optional int Number of records, default & max: 25 Returns ------- list :: [ [ ID, CURRENCY, CURRENCY_NAME, null, null, MTS_STARTED, MTS_UPDATED, null, null, STATUS, null, null, AMOUNT, FEES, null, null, DESTINATION_ADDRESS, null, null, null, TRANSACTION_ID, null ], ... ] Example ------- :: bfx_client.movements() bfx_client.movements("BTC") """ body = kwargs raw_body = json.dumps(body) add_currency = "{}/".format(currency.upper()) if currency else "" path = "v2/auth/r/movements/{}hist".format(add_currency) response = self._post(path, raw_body, verify=True) return response def alert_list(self): """`Bitfinex list alerts reference <https://docs.bitfinex.com/reference#rest-auth-alerts>`_ List of active alerts Returns ------- list :: [ [ 'price:tBTCUSD:560.92', 'price', 'tBTCUSD', 560.92, 91 ], ... ] Example ------- :: bfx_client.alert_list() """ body = {'type': 'price'} raw_body = json.dumps(body) path = "v2/auth/r/alerts" response = self._post(path, raw_body, verify=True) return response def
line_num=6350, add=1) ClassGetCallerInfo1S.get_caller_info_s1bt(exp_stack=exp_stack, capsys=capsys) # call base class class method target update_stack(exp_stack=exp_stack, line_num=6355, add=0) cls.get_caller_info_c1bt(exp_stack=exp_stack, capsys=capsys) update_stack(exp_stack=exp_stack, line_num=6357, add=0) super().get_caller_info_c1bt(exp_stack=exp_stack, capsys=capsys) update_stack(exp_stack=exp_stack, line_num=6359, add=1) ClassGetCallerInfo1.get_caller_info_c1bt(exp_stack=exp_stack, capsys=capsys) update_stack(exp_stack=exp_stack, line_num=6362, add=1) ClassGetCallerInfo1S.get_caller_info_c1bt(exp_stack=exp_stack, capsys=capsys) # call module level function update_stack(exp_stack=exp_stack, line_num=6367, add=0) func_get_caller_info_2(exp_stack=exp_stack, capsys=capsys) # call method cls_get_caller_info2 = ClassGetCallerInfo2() update_stack(exp_stack=exp_stack, line_num=6372, add=1) cls_get_caller_info2.get_caller_info_m2(exp_stack=exp_stack, capsys=capsys) # call static method update_stack(exp_stack=exp_stack, line_num=6377, add=1) cls_get_caller_info2.get_caller_info_s2(exp_stack=exp_stack, capsys=capsys) # call class method update_stack(exp_stack=exp_stack, line_num=6382, add=1) ClassGetCallerInfo2.get_caller_info_c2(exp_stack=exp_stack, capsys=capsys) # call overloaded base class method update_stack(exp_stack=exp_stack, line_num=6387, add=1) cls_get_caller_info2.get_caller_info_m2bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class static method update_stack(exp_stack=exp_stack, line_num=6392, add=1) cls_get_caller_info2.get_caller_info_s2bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class class method update_stack(exp_stack=exp_stack, line_num=6397, add=1) ClassGetCallerInfo2.get_caller_info_c2bo(exp_stack=exp_stack, capsys=capsys) # call subclass method cls_get_caller_info2s = ClassGetCallerInfo2S() update_stack(exp_stack=exp_stack, line_num=6403, add=1) cls_get_caller_info2s.get_caller_info_m2s(exp_stack=exp_stack, capsys=capsys) # call subclass static method update_stack(exp_stack=exp_stack, line_num=6408, add=1) cls_get_caller_info2s.get_caller_info_s2s(exp_stack=exp_stack, capsys=capsys) # call subclass class method update_stack(exp_stack=exp_stack, line_num=6413, add=1) ClassGetCallerInfo2S.get_caller_info_c2s(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass method update_stack(exp_stack=exp_stack, line_num=6418, add=1) cls_get_caller_info2s.get_caller_info_m2bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass static method update_stack(exp_stack=exp_stack, line_num=6423, add=1) cls_get_caller_info2s.get_caller_info_s2bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass class method update_stack(exp_stack=exp_stack, line_num=6428, add=1) ClassGetCallerInfo2S.get_caller_info_c2bo(exp_stack=exp_stack, capsys=capsys) # call base method from subclass method update_stack(exp_stack=exp_stack, line_num=6433, add=1) cls_get_caller_info2s.get_caller_info_m2sb(exp_stack=exp_stack, capsys=capsys) # call base static method from subclass static method update_stack(exp_stack=exp_stack, line_num=6438, add=1) cls_get_caller_info2s.get_caller_info_s2sb(exp_stack=exp_stack, capsys=capsys) # call base class method from subclass class method update_stack(exp_stack=exp_stack, line_num=6443, add=1) ClassGetCallerInfo2S.get_caller_info_c2sb(exp_stack=exp_stack, capsys=capsys) exp_stack.pop() ############################################################################### # Class 2 ############################################################################### class ClassGetCallerInfo2: """Class to get caller info2.""" def __init__(self) -> None: """The initialization.""" self.var1 = 1 ########################################################################### # Class 2 Method 1 ########################################################################### def get_caller_info_m2(self, exp_stack: Deque[CallerInfo], capsys: Optional[Any]) -> None: """Get caller info method 2. Args: exp_stack: The expected call stack capsys: Pytest fixture that captures output """ self.var1 += 1 exp_caller_info = CallerInfo(mod_name='test_diag_msg.py', cls_name='ClassGetCallerInfo2', func_name='get_caller_info_m2', line_num=6428) exp_stack.append(exp_caller_info) update_stack(exp_stack=exp_stack, line_num=6482, add=0) for i, expected_caller_info in enumerate(list(reversed(exp_stack))): try: frame = _getframe(i) caller_info = get_caller_info(frame) finally: del frame assert caller_info == expected_caller_info # test call sequence update_stack(exp_stack=exp_stack, line_num=6489, add=0) call_seq = get_formatted_call_sequence(depth=len(exp_stack)) assert call_seq == get_exp_seq(exp_stack=exp_stack) if capsys: # if capsys, test diag_msg update_stack(exp_stack=exp_stack, line_num=6496, add=0) before_time = datetime.now() diag_msg('message 1', 1, depth=len(exp_stack)) after_time = datetime.now() diag_msg_args = TestDiagMsg.get_diag_msg_args( depth_arg=len(exp_stack), msg_arg=['message 1', 1]) verify_diag_msg(exp_stack=exp_stack, before_time=before_time, after_time=after_time, capsys=capsys, diag_msg_args=diag_msg_args) # call module level function update_stack(exp_stack=exp_stack, line_num=6511, add=0) func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys) # call method cls_get_caller_info3 = ClassGetCallerInfo3() update_stack(exp_stack=exp_stack, line_num=6516, add=1) cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack, capsys=capsys) # call static method update_stack(exp_stack=exp_stack, line_num=6521, add=1) cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack, capsys=capsys) # call class method update_stack(exp_stack=exp_stack, line_num=6526, add=1) ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack, capsys=capsys) # call overloaded base class method update_stack(exp_stack=exp_stack, line_num=6531, add=1) cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class static method update_stack(exp_stack=exp_stack, line_num=6536, add=1) cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class class method update_stack(exp_stack=exp_stack, line_num=6541, add=1) ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call subclass method cls_get_caller_info3s = ClassGetCallerInfo3S() update_stack(exp_stack=exp_stack, line_num=6547, add=1) cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack, capsys=capsys) # call subclass static method update_stack(exp_stack=exp_stack, line_num=6552, add=1) cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack, capsys=capsys) # call subclass class method update_stack(exp_stack=exp_stack, line_num=6557, add=1) ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass method update_stack(exp_stack=exp_stack, line_num=6562, add=1) cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass static method update_stack(exp_stack=exp_stack, line_num=6567, add=1) cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass class method update_stack(exp_stack=exp_stack, line_num=6572, add=1) ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call base method from subclass method update_stack(exp_stack=exp_stack, line_num=6577, add=1) cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack, capsys=capsys) # call base static method from subclass static method update_stack(exp_stack=exp_stack, line_num=6582, add=1) cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack, capsys=capsys) # call base class class method from subclass class method update_stack(exp_stack=exp_stack, line_num=6587, add=1) ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack, capsys=capsys) exp_stack.pop() ########################################################################### # Class 2 Method 2 ########################################################################### @staticmethod def get_caller_info_s2(exp_stack: Deque[CallerInfo], capsys: Optional[Any]) -> None: """Get caller info static method 2. Args: exp_stack: The expected call stack capsys: Pytest fixture that captures output """ exp_caller_info = CallerInfo(mod_name='test_diag_msg.py', cls_name='ClassGetCallerInfo2', func_name='get_caller_info_s2', line_num=6559) exp_stack.append(exp_caller_info) update_stack(exp_stack=exp_stack, line_num=6614, add=0) for i, expected_caller_info in enumerate(list(reversed(exp_stack))): try: frame = _getframe(i) caller_info = get_caller_info(frame) finally: del frame assert caller_info == expected_caller_info # test call sequence update_stack(exp_stack=exp_stack, line_num=6621, add=0) call_seq = get_formatted_call_sequence(depth=len(exp_stack)) assert call_seq == get_exp_seq(exp_stack=exp_stack) if capsys: # if capsys, test diag_msg update_stack(exp_stack=exp_stack, line_num=6628, add=0) before_time = datetime.now() diag_msg('message 1', 1, depth=len(exp_stack)) after_time = datetime.now() diag_msg_args = TestDiagMsg.get_diag_msg_args( depth_arg=len(exp_stack), msg_arg=['message 1', 1]) verify_diag_msg(exp_stack=exp_stack, before_time=before_time, after_time=after_time, capsys=capsys, diag_msg_args=diag_msg_args) # call module level function update_stack(exp_stack=exp_stack, line_num=6643, add=0) func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys) # call method cls_get_caller_info3 = ClassGetCallerInfo3() update_stack(exp_stack=exp_stack, line_num=6648, add=1) cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack, capsys=capsys) # call static method update_stack(exp_stack=exp_stack, line_num=6653, add=1) cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack, capsys=capsys) # call class method update_stack(exp_stack=exp_stack, line_num=6658, add=1) ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack, capsys=capsys) # call overloaded base class method update_stack(exp_stack=exp_stack, line_num=6663, add=1) cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class static method update_stack(exp_stack=exp_stack, line_num=6668, add=1) cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class class method update_stack(exp_stack=exp_stack, line_num=6673, add=1) ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call subclass method cls_get_caller_info3s = ClassGetCallerInfo3S() update_stack(exp_stack=exp_stack, line_num=6679, add=1) cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack, capsys=capsys) # call subclass static method update_stack(exp_stack=exp_stack, line_num=6684, add=1) cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack, capsys=capsys) # call subclass class method update_stack(exp_stack=exp_stack, line_num=6689, add=1) ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass method update_stack(exp_stack=exp_stack, line_num=6694, add=1) cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass static method update_stack(exp_stack=exp_stack, line_num=6699, add=1) cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass class method update_stack(exp_stack=exp_stack, line_num=6704, add=1) ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call base method from subclass method update_stack(exp_stack=exp_stack, line_num=6709, add=1) cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack, capsys=capsys) # call base static method from subclass static method update_stack(exp_stack=exp_stack, line_num=6714, add=1) cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack, capsys=capsys) # call base class method from subclass class method update_stack(exp_stack=exp_stack, line_num=6719, add=1) ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack, capsys=capsys) exp_stack.pop() ########################################################################### # Class 2 Method 3 ########################################################################### @classmethod def get_caller_info_c2(cls, exp_stack: Deque[CallerInfo], capsys: Optional[Any]) -> None: """Get caller info class method 2. Args: exp_stack: The expected call stack capsys: Pytest fixture that captures output """ exp_caller_info = CallerInfo(mod_name='test_diag_msg.py', cls_name='ClassGetCallerInfo2', func_name='get_caller_info_c2', line_num=6690) exp_stack.append(exp_caller_info) update_stack(exp_stack=exp_stack, line_num=6746, add=0) for i, expected_caller_info in enumerate(list(reversed(exp_stack))): try: frame = _getframe(i) caller_info = get_caller_info(frame) finally: del frame assert caller_info == expected_caller_info # test call sequence update_stack(exp_stack=exp_stack, line_num=6753, add=0) call_seq = get_formatted_call_sequence(depth=len(exp_stack)) assert call_seq == get_exp_seq(exp_stack=exp_stack) if capsys: # if capsys, test diag_msg update_stack(exp_stack=exp_stack, line_num=6760, add=0) before_time = datetime.now() diag_msg('message 1', 1, depth=len(exp_stack)) after_time = datetime.now() diag_msg_args = TestDiagMsg.get_diag_msg_args( depth_arg=len(exp_stack), msg_arg=['message 1', 1]) verify_diag_msg(exp_stack=exp_stack, before_time=before_time, after_time=after_time, capsys=capsys, diag_msg_args=diag_msg_args) # call module level function update_stack(exp_stack=exp_stack, line_num=6775, add=0) func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys) # call method cls_get_caller_info3 = ClassGetCallerInfo3() update_stack(exp_stack=exp_stack, line_num=6780, add=1) cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack, capsys=capsys) # call static method update_stack(exp_stack=exp_stack, line_num=6785, add=1) cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack, capsys=capsys) # call class method update_stack(exp_stack=exp_stack, line_num=6790, add=1) ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack, capsys=capsys) # call overloaded base class method update_stack(exp_stack=exp_stack, line_num=6795, add=1) cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class static method update_stack(exp_stack=exp_stack, line_num=6800, add=1) cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class class method update_stack(exp_stack=exp_stack, line_num=6805, add=1) ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call subclass method cls_get_caller_info3s = ClassGetCallerInfo3S() update_stack(exp_stack=exp_stack, line_num=6811, add=1) cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack, capsys=capsys) # call subclass static method update_stack(exp_stack=exp_stack, line_num=6816, add=1) cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack, capsys=capsys) # call subclass class method update_stack(exp_stack=exp_stack, line_num=6821, add=1) ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass method update_stack(exp_stack=exp_stack, line_num=6826, add=1) cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass static method update_stack(exp_stack=exp_stack, line_num=6831, add=1) cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded subclass class method update_stack(exp_stack=exp_stack, line_num=6836, add=1) ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call base method from subclass method update_stack(exp_stack=exp_stack, line_num=6841, add=1) cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack, capsys=capsys) # call base static method from subclass static method update_stack(exp_stack=exp_stack, line_num=6846, add=1) cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack, capsys=capsys) # call base class method from subclass class method update_stack(exp_stack=exp_stack, line_num=6851, add=1) ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack, capsys=capsys) exp_stack.pop() ########################################################################### # Class 2 Method 4 ########################################################################### def get_caller_info_m2bo(self, exp_stack: Deque[CallerInfo], capsys: Optional[Any]) -> None: """Get caller info overloaded method 2. Args: exp_stack: The expected call stack capsys: Pytest fixture that captures output """ exp_caller_info = CallerInfo(mod_name='test_diag_msg.py', cls_name='ClassGetCallerInfo2', func_name='get_caller_info_m2bo', line_num=6821) exp_stack.append(exp_caller_info) update_stack(exp_stack=exp_stack, line_num=6878, add=0) for i, expected_caller_info in enumerate(list(reversed(exp_stack))): try: frame = _getframe(i) caller_info = get_caller_info(frame) finally: del frame assert caller_info == expected_caller_info # test call sequence update_stack(exp_stack=exp_stack, line_num=6885, add=0) call_seq = get_formatted_call_sequence(depth=len(exp_stack)) assert call_seq == get_exp_seq(exp_stack=exp_stack) if capsys: # if capsys, test diag_msg update_stack(exp_stack=exp_stack, line_num=6892, add=0) before_time = datetime.now() diag_msg('message 1', 1, depth=len(exp_stack)) after_time = datetime.now() diag_msg_args = TestDiagMsg.get_diag_msg_args( depth_arg=len(exp_stack), msg_arg=['message 1', 1]) verify_diag_msg(exp_stack=exp_stack, before_time=before_time, after_time=after_time, capsys=capsys, diag_msg_args=diag_msg_args) # call module level function update_stack(exp_stack=exp_stack, line_num=6907, add=0) func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys) # call method cls_get_caller_info3 = ClassGetCallerInfo3() update_stack(exp_stack=exp_stack, line_num=6912, add=1) cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack, capsys=capsys) # call static method update_stack(exp_stack=exp_stack, line_num=6917, add=1) cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack, capsys=capsys) # call class method update_stack(exp_stack=exp_stack, line_num=6922, add=1) ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack, capsys=capsys) # call overloaded base class method update_stack(exp_stack=exp_stack, line_num=6927, add=1) cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class static method update_stack(exp_stack=exp_stack, line_num=6932, add=1) cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack, capsys=capsys) # call overloaded base class class method update_stack(exp_stack=exp_stack, line_num=6937, add=1) ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack, capsys=capsys) # call
<filename>packages/pdf/src/RPA/PDF/keywords/finder.py import functools import math import re from dataclasses import dataclass try: # Python >=3.7 from re import Pattern except ImportError: # Python =3.6 from re import _pattern_type as Pattern from typing import ( Callable, Dict, List, Optional, Union, ) from RPA.PDF.keywords import ( LibraryContext, keyword, ) from RPA.PDF.keywords.model import BaseElement, TextBox class TargetObject(BaseElement): """Container for Target text boxes with coordinates.""" # Class level constants. boxid: int = -1 text: str = "" Element = Union[TextBox, TargetObject] @dataclass class Match: """Match object returned by the `Find Text` keyword. It contains the anchor point and its relative found elements in text format. """ anchor: str direction: str neighbours: List[str] class FinderKeywords(LibraryContext): """Keywords for locating elements.""" def __init__(self, ctx): super().__init__(ctx) # Text locator might lead to multiple valid found anchors. self._anchors: List[Element] = [] # The others usually have just one. (if multiple are found, set to it the # first one) self.anchor_element = None def _get_candidate_search_function( self, direction: str, regexp: Optional[Pattern], strict: bool ) -> Callable[[TextBox], bool]: if direction in ["left", "right"]: return functools.partial( self._is_match_on_horizontal, direction=direction, regexp=regexp, strict=strict, ) if direction in ["top", "bottom", "up", "down"]: return functools.partial( self._is_match_on_vertical, direction=direction, regexp=regexp, strict=strict, ) if direction == "box": return self._is_match_in_box raise ValueError(f"Not recognized direction search {direction!r}") def _log_element(self, elem: Element, prefix: str = ""): template = f"{prefix} box %d | bbox %s | text %r" self.logger.debug(template, elem.boxid, elem.bbox, elem.text) @keyword def find_text( self, locator: str, pagenum: Union[int, str] = 1, direction: str = "right", closest_neighbours: Optional[Union[int, str]] = 1, strict: bool = False, regexp: str = None, trim: bool = True, ) -> List[Match]: """Find the closest text elements near the set anchor(s) through `locator`. The PDF will be parsed automatically before elements can be searched. :param locator: Element to set anchor to. This can be prefixed with either `text:`, `regex:` or `coords:` to find the anchor by text or coordinates. `text` is assumed if no such prefix is specified. (text search is case insensitive) :param pagenum: Page number where search is performed on, defaults to 1 (first page). :param direction: In which direction to search for text elements. This can be any of 'top'/'up', 'bottom'/'down', 'left' or 'right'. (defaults to 'right') :param closest_neighbours: How many neighbours to return at most, sorted by the distance from the current anchor. :param strict: If element's margins should be used for matching those which are aligned to the anchor. (turned off by default) :param regexp: Expected format of the searched text value. By default all the candidates in range are considered valid neighbours. :param trim: Automatically trim leading/trailing whitespace from the text elements. (switched on by default) :returns: A list of `Match` objects where every match has the following attributes: `.anchor` - the matched text with the locator; `.neighbours` - a list of adjacent texts found on the specified direction **Examples** **Robot Framework** .. code-block:: robotframework PDF Invoice Parsing Open Pdf invoice.pdf ${matches} = Find Text Invoice Number Log List ${matches} .. code-block:: List has one item: Match(anchor='Invoice Number', direction='right', neighbours=['INV-3337']) **Python** .. code-block:: python from RPA.PDF import PDF pdf = PDF() def pdf_invoice_parsing(): pdf.open_pdf("invoice.pdf") matches = pdf.find_text("Invoice Number") for match in matches: print(match) pdf_invoice_parsing() .. code-block:: Match(anchor='Invoice Number', direction='right', neighbours=['INV-3337']) """ pagenum = int(pagenum) if closest_neighbours is not None: closest_neighbours = int(closest_neighbours) self.logger.info( "Searching for %s neighbour(s) to the %s of %r on page %d using regular " "expression: %s", f"closest {closest_neighbours}" if closest_neighbours is not None else "all", direction, locator, pagenum, regexp, ) self.set_anchor_to_element(locator, trim=trim, pagenum=pagenum) if not self.anchor_element: self.logger.warning("No anchor(s) set for locator: %s", locator) return [] regexp_compiled = re.compile(regexp) if regexp else None search_for_candidate = self._get_candidate_search_function( direction, regexp_compiled, strict ) candidates_dict: Dict[int, List[Element]] = {} anchors_map: Dict[int, Element] = {} for anchor in self._anchors: candidates_dict[anchor.boxid] = [] anchors_map[anchor.boxid] = anchor for candidate in self._get_textboxes_on_page(pagenum): self._log_element(candidate, prefix="Current candidate:") for anchor in self._anchors: self._log_element(anchor, prefix="Current anchor:") # Skip anchor element itself from matching and check if the candidate # matches the search criteria. if candidate.boxid != anchor.boxid and search_for_candidate( candidate, anchor=anchor ): candidates_dict[anchor.boxid].append(candidate) matches = [] for anchor_id, candidates in candidates_dict.items(): anchor = anchors_map[anchor_id] self._sort_candidates_by_anchor(candidates, anchor=anchor) if closest_neighbours is not None: # Keep the first N closest neighbours from the entire set of candidates. candidates[closest_neighbours:] = [] match = Match( anchor=anchor.text, direction=direction, neighbours=[candidate.text for candidate in candidates], ) matches.append(match) return matches @keyword def set_anchor_to_element( self, locator: str, trim: bool = True, pagenum: Union[int, str] = 1 ) -> bool: """Sets main anchor point in the document for further searches. This is used internally in the library and can work with multiple anchors at the same time if such are found. :param locator: Element to set anchor to. This can be prefixed with either `text:`, `regex:` or `coords:` to find the anchor by text or coordinates. `text` is assumed if no such prefix is specified. (text search is case insensitive) :param trim: Automatically trim leading/trailing whitespace from the text elements. (switched on by default) :param pagenum: Page number where search is performed on, defaults to 1 (first page). :returns: True if at least one anchor was found. **Examples** **Robot Framework** .. code-block:: robotframework Example Keyword ${success} = Set Anchor To Element Invoice Number **Python** .. code-block:: python from RPA.PDF import PDF pdf = PDF() def example_keyword(): success = pdf.set_anchor_to_element("Invoice Number") """ pagenum = int(pagenum) self.logger.info( "Trying to set anchor on page %d using locator: %r", pagenum, locator ) self.ctx.convert(trim=trim, pagenum=pagenum) self._anchors.clear() self.anchor_element = None pure_locator = locator criteria = "text" parts = locator.split(":", 1) if len(parts) == 2 and parts[0] in ("coords", "text", "regex"): criteria = parts[0] pure_locator = parts[1] if criteria == "coords": coords = pure_locator.split(",") if len(coords) == 2: left, bottom = coords top = bottom right = left elif len(coords) == 4: left, bottom, right, top = coords else: raise ValueError("Give 2 coordinates for point, or 4 for area") bbox = ( int(left), int(bottom), int(right), int(top), ) anchor = TargetObject(bbox=bbox) self._anchors.append(anchor) else: if criteria == "regex": pure_locator = re.compile(pure_locator) anchors = self._find_matching_textboxes(pure_locator, pagenum=pagenum) self._anchors.extend(anchors) if self._anchors: self.anchor_element = self._anchors[0] return True return False def _get_textboxes_on_page(self, pagenum: int) -> List[TextBox]: page = self.active_pdf_document.get_page(pagenum) return list(page.textboxes.values()) def _find_matching_textboxes( self, locator: Union[str, Pattern], *, pagenum: int ) -> List[TextBox]: self.logger.info("Searching for matching text boxes with: %r", locator) if isinstance(locator, str): lower_locator = locator.lower() matches_anchor = ( lambda _anchor: _anchor.text.lower() == lower_locator ) # noqa: E731 else: matches_anchor = lambda _anchor: locator.match(_anchor.text) # noqa: E731 anchors = [] for anchor in self._get_textboxes_on_page(pagenum): if matches_anchor(anchor): anchors.append(anchor) if anchors: self.logger.info("Found %d matches with locator %r", len(anchors), locator) for anchor in anchors: self._log_element(anchor) else: self.logger.warning("Did not find any matches with locator %r", locator) return anchors def _check_text_match(self, candidate: TextBox, regexp: Optional[Pattern]) -> bool: if regexp and regexp.match(candidate.text): self._log_element(candidate, prefix="Exact match:") return True if regexp is None: self._log_element(candidate, prefix="Potential match:") return True return False def _is_match_on_horizontal( self, candidate: TextBox, *, direction: str, regexp: Optional[Pattern], strict: bool, anchor: TextBox, ) -> bool: (left, bottom, right, top) = anchor.bbox direction_left = direction == "left" direction_right = direction == "right" if not any( [ direction_left and candidate.right <= left, direction_right and candidate.left >= right, ] ): return False # not in the seeked direction non_strict_match = not strict and ( bottom <= candidate.bottom <= top or bottom <= candidate.top <= top or candidate.bottom <= bottom <= candidate.top or candidate.bottom <= top <= candidate.top ) strict_match = strict and (candidate.bottom == bottom or candidate.top == top) if not any([non_strict_match, strict_match]): return False # candidate not in boundaries return self._check_text_match(candidate, regexp) def _is_match_on_vertical( self, candidate: TextBox, *, direction: str, regexp: Optional[Pattern], strict: bool, anchor: TextBox, ) -> bool: (left, bottom, right, top) = anchor.bbox direction_down = direction in ["bottom", "down"] direction_up = direction in ["top", "up"] if not any( [ direction_down and candidate.top <= bottom, direction_up and candidate.bottom >= top, ] ): return False # not in the seeked direction non_strict_match = not strict and ( left <= candidate.left <=
from __future__ import print_function import os import signal import socket import struct import subprocess import sys import unittest import random import time import tempfile import plasma USE_VALGRIND = False def random_object_id(): return "".join([chr(random.randint(0, 255)) for _ in range(plasma.PLASMA_ID_SIZE)]) def generate_metadata(length): metadata = length * ["\x00"] if length > 0: metadata[0] = chr(random.randint(0, 255)) metadata[-1] = chr(random.randint(0, 255)) for _ in range(100): metadata[random.randint(0, length - 1)] = chr(random.randint(0, 255)) return buffer("".join(metadata)) def write_to_data_buffer(buff, length): if length > 0: buff[0] = chr(random.randint(0, 255)) buff[-1] = chr(random.randint(0, 255)) for _ in range(100): buff[random.randint(0, length - 1)] = chr(random.randint(0, 255)) def create_object(client, data_size, metadata_size, seal=True): object_id = random_object_id() metadata = generate_metadata(metadata_size) memory_buffer = client.create(object_id, data_size, metadata) write_to_data_buffer(memory_buffer, data_size) if seal: client.seal(object_id) return object_id, memory_buffer, metadata def assert_get_object_equal(unit_test, client1, client2, object_id, memory_buffer=None, metadata=None): if memory_buffer is not None: unit_test.assertEqual(memory_buffer[:], client2.get(object_id)[:]) if metadata is not None: unit_test.assertEqual(metadata[:], client2.get_metadata(object_id)[:]) unit_test.assertEqual(client1.get(object_id)[:], client2.get(object_id)[:]) unit_test.assertEqual(client1.get_metadata(object_id)[:], client2.get_metadata(object_id)[:]) class TestPlasmaClient(unittest.TestCase): def setUp(self): # Start Plasma. plasma_store_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_store") store_name = "/tmp/store{}".format(random.randint(0, 10000)) command = [plasma_store_executable, "-s", store_name] if USE_VALGRIND: self.p = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full"] + command) time.sleep(2.0) else: self.p = subprocess.Popen(command) # Connect to Plasma. self.plasma_client = plasma.PlasmaClient(store_name) def tearDown(self): # Kill the plasma store process. if USE_VALGRIND: self.p.send_signal(signal.SIGTERM) self.p.wait() if self.p.returncode != 0: os._exit(-1) else: self.p.kill() def test_create(self): # Create an object id string. object_id = random_object_id() # Create a new buffer and write to it. length = 50 memory_buffer = self.plasma_client.create(object_id, length) for i in range(length): memory_buffer[i] = chr(i % 256) # Seal the object. self.plasma_client.seal(object_id) # Get the object. memory_buffer = self.plasma_client.get(object_id) for i in range(length): self.assertEqual(memory_buffer[i], chr(i % 256)) def test_create_with_metadata(self): for length in range(1000): # Create an object id string. object_id = random_object_id() # Create a random metadata string. metadata = generate_metadata(length) # Create a new buffer and write to it. memory_buffer = self.plasma_client.create(object_id, length, metadata) for i in range(length): memory_buffer[i] = chr(i % 256) # Seal the object. self.plasma_client.seal(object_id) # Get the object. memory_buffer = self.plasma_client.get(object_id) for i in range(length): self.assertEqual(memory_buffer[i], chr(i % 256)) # Get the metadata. metadata_buffer = self.plasma_client.get_metadata(object_id) self.assertEqual(len(metadata), len(metadata_buffer)) for i in range(len(metadata)): self.assertEqual(metadata[i], metadata_buffer[i]) def test_contains(self): fake_object_ids = [random_object_id() for _ in range(100)] real_object_ids = [random_object_id() for _ in range(100)] for object_id in real_object_ids: self.assertFalse(self.plasma_client.contains(object_id)) memory_buffer = self.plasma_client.create(object_id, 100) self.plasma_client.seal(object_id) self.assertTrue(self.plasma_client.contains(object_id)) for object_id in fake_object_ids: self.assertFalse(self.plasma_client.contains(object_id)) for object_id in real_object_ids: self.assertTrue(self.plasma_client.contains(object_id)) # def test_individual_delete(self): # length = 100 # # Create an object id string. # object_id = random_object_id() # # Create a random metadata string. # metadata = generate_metadata(100) # # Create a new buffer and write to it. # memory_buffer = self.plasma_client.create(object_id, length, metadata) # for i in range(length): # memory_buffer[i] = chr(i % 256) # # Seal the object. # self.plasma_client.seal(object_id) # # Check that the object is present. # self.assertTrue(self.plasma_client.contains(object_id)) # # Delete the object. # self.plasma_client.delete(object_id) # # Make sure the object is no longer present. # self.assertFalse(self.plasma_client.contains(object_id)) # # def test_delete(self): # # Create some objects. # object_ids = [random_object_id() for _ in range(100)] # for object_id in object_ids: # length = 100 # # Create a random metadata string. # metadata = generate_metadata(100) # # Create a new buffer and write to it. # memory_buffer = self.plasma_client.create(object_id, length, metadata) # for i in range(length): # memory_buffer[i] = chr(i % 256) # # Seal the object. # self.plasma_client.seal(object_id) # # Check that the object is present. # self.assertTrue(self.plasma_client.contains(object_id)) # # # Delete the objects and make sure they are no longer present. # for object_id in object_ids: # # Delete the object. # self.plasma_client.delete(object_id) # # Make sure the object is no longer present. # self.assertFalse(self.plasma_client.contains(object_id)) def test_illegal_functionality(self): # Create an object id string. object_id = random_object_id() # Create a new buffer and write to it. length = 1000 memory_buffer = self.plasma_client.create(object_id, length) # Make sure we cannot access memory out of bounds. self.assertRaises(Exception, lambda : memory_buffer[length]) # Seal the object. self.plasma_client.seal(object_id) # This test is commented out because it currently fails. # # Make sure the object is ready only now. # def illegal_assignment(): # memory_buffer[0] = chr(0) # self.assertRaises(Exception, illegal_assignment) # Get the object. memory_buffer = self.plasma_client.get(object_id) # Make sure the object is read only. def illegal_assignment(): memory_buffer[0] = chr(0) self.assertRaises(Exception, illegal_assignment) def test_subscribe(self): # Subscribe to notifications from the Plasma Store. sock = self.plasma_client.subscribe() for i in [1, 10, 100, 1000, 10000, 100000]: object_ids = [random_object_id() for _ in range(i)] for object_id in object_ids: # Create an object and seal it to trigger a notification. self.plasma_client.create(object_id, 1000) self.plasma_client.seal(object_id) # Check that we received notifications for all of the objects. for object_id in object_ids: message_data = self.plasma_client.get_next_notification() self.assertEqual(object_id, message_data) class TestPlasmaManager(unittest.TestCase): def setUp(self): # Start two PlasmaStores. plasma_store_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_store") store_name1 = "/tmp/store{}".format(random.randint(0, 10000)) store_name2 = "/tmp/store{}".format(random.randint(0, 10000)) plasma_store_command1 = [plasma_store_executable, "-s", store_name1] plasma_store_command2 = [plasma_store_executable, "-s", store_name2] if USE_VALGRIND: self.p2 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--error-exitcode=1"] + plasma_store_command1) self.p3 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--error-exitcode=1"] + plasma_store_command2) else: self.p2 = subprocess.Popen(plasma_store_command1) self.p3 = subprocess.Popen(plasma_store_command2) # Start a Redis server. redis_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../common/thirdparty/redis-3.2.3/src/redis-server") self.redis_process = None manager_redis_args = [] if os.path.exists(redis_path): redis_port = 6379 with open(os.devnull, 'w') as FNULL: self.redis_process = subprocess.Popen([redis_path, "--port", str(redis_port)], stdout=FNULL) time.sleep(0.1) manager_redis_args = ["-d", "{addr}:{port}".format(addr="127.0.0.1", port=redis_port)] # Start two PlasmaManagers. self.port1 = random.randint(10000, 50000) self.port2 = random.randint(10000, 50000) plasma_manager_executable = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../build/plasma_manager") plasma_manager_command1 = [plasma_manager_executable, "-s", store_name1, "-m", "127.0.0.1", "-p", str(self.port1)] + manager_redis_args plasma_manager_command2 = [plasma_manager_executable, "-s", store_name2, "-m", "127.0.0.1", "-p", str(self.port2)] + manager_redis_args if USE_VALGRIND: self.p4 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--error-exitcode=1"] + plasma_manager_command1) self.p5 = subprocess.Popen(["valgrind", "--track-origins=yes", "--leak-check=full", "--error-exitcode=1"] + plasma_manager_command2) else: self.p4 = subprocess.Popen(plasma_manager_command1) self.p5 = subprocess.Popen(plasma_manager_command2) # Connect two PlasmaClients. self.client1 = plasma.PlasmaClient(store_name1, "127.0.0.1", self.port1) self.client2 = plasma.PlasmaClient(store_name2, "127.0.0.1", self.port2) def tearDown(self): # Kill the PlasmaStore and PlasmaManager processes. if USE_VALGRIND: self.p4.send_signal(signal.SIGTERM) self.p4.wait() self.p5.send_signal(signal.SIGTERM) self.p5.wait() self.p2.send_signal(signal.SIGTERM) self.p2.wait() self.p3.send_signal(signal.SIGTERM) self.p3.wait() if self.p2.returncode != 0 or self.p3.returncode != 0 or self.p4.returncode != 0 or self.p5.returncode != 0: print("aborting due to valgrind error") os._exit(-1) else: self.p2.kill() self.p3.kill() self.p4.kill() self.p5.kill() if self.redis_process: self.redis_process.kill() def test_fetch(self): if self.redis_process is None: print("Cannot test fetch without a running redis instance.") self.assertTrue(False) for _ in range(100): # Create an object. object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000) # Fetch the object from the other plasma store. # TODO(swang): This line is a hack! It makes sure that the entry will be # in the object table once we call the fetch operation. Remove once # retries are implemented by Ray common. time.sleep(0.1) successes = self.client2.fetch([object_id1]) self.assertEqual(successes, [True]) # Compare the two buffers. assert_get_object_equal(self, self.client1, self.client2, object_id1, memory_buffer=memory_buffer1, metadata=metadata1) # Fetch in the other direction. These should return quickly because # client1 already has the object. successes = self.client1.fetch([object_id1]) self.assertEqual(successes, [True]) assert_get_object_equal(self, self.client2, self.client1, object_id1, memory_buffer=memory_buffer1, metadata=metadata1) def test_fetch_multiple(self): if self.redis_process is None: print("Cannot test fetch without a running redis instance.") self.assertTrue(False) for _ in range(20): # Create two objects and a third fake one that doesn't exist. object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000) missing_object_id = random_object_id() object_id2, memory_buffer2, metadata2 = create_object(self.client1, 2000, 2000) object_ids = [object_id1, missing_object_id, object_id2] # Fetch the objects from the other plasma store. The second object ID # should timeout since it does not exist. # TODO(swang): This line is a hack! It makes sure that the entry will be # in the object table once we call the fetch operation. Remove once # retries are implemented by Ray common. time.sleep(0.1) successes = self.client2.fetch(object_ids) self.assertEqual(successes, [True, False, True]) # Compare the buffers of the objects that do exist. assert_get_object_equal(self, self.client1, self.client2, object_id1, memory_buffer=memory_buffer1, metadata=metadata1) assert_get_object_equal(self, self.client1, self.client2, object_id2, memory_buffer=memory_buffer2, metadata=metadata2) # Fetch in the other direction. The fake object still does not exist. successes = self.client1.fetch(object_ids) self.assertEqual(successes, [True, False, True]) assert_get_object_equal(self, self.client2, self.client1, object_id1, memory_buffer=memory_buffer1, metadata=metadata1) assert_get_object_equal(self, self.client2, self.client1, object_id2, memory_buffer=memory_buffer2, metadata=metadata2) def test_transfer(self): for _ in range(100): # Create an object. object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000) # Transfer the buffer to the the other PlasmaStore. self.client1.transfer("127.0.0.1", self.port2, object_id1) # Compare the two buffers.
None: self.adv_temperature = nn.Parameter(torch.Tensor([adv_temperature])) self.adv_temperature.requires_grad = False self.adv_flag = True else: self.adv_flag = False def get_weights(self, n_score): return F.softmax(n_score * self.adv_temperature, dim = -1).detach() def forward(self, p_score, n_score): if self.adv_flag: return -(self.criterion(p_score).mean() + (self.get_weights(n_score) * self.criterion(-n_score)).sum(dim = -1).mean()) / 2 else: return -(self.criterion(p_score).mean() + self.criterion(-n_score).mean()) / 2 def predict(self, p_score, n_score): score = self.forward(p_score, n_score) return score.cpu().data.numpy() class DistMultLayer(KGCompletionLayerBase): r"""Specific class for knowledge graph completion task. DistMult from paper `Embedding entities and relations for learning and inference in knowledge bases <https://arxiv.org/pdf/1412.6575.pdf>`__. .. math:: f(s, r, o) & = e_s^T R_r e_o Parameters ---------- input_dropout: float Dropout for node_emb and rel_emb. Default: 0.0 rel_emb_from_gnn: bool If `rel_emb` is computed from GNN, rel_emb_from_gnn is set to `True`. Else, rel_emb is initialized as nn.Embedding randomly. Default: `True`. num_relations: int Number of relations. `num_relations` is needed if rel_emb_from_gnn==True. Default: `None`. embedding_dim: int Dimension of the rel_emb. `embedding_dim` is needed if rel_emb_from_gnn==True. Default: `0`. loss_name: str The loss type selected fot the KG completion task. """ def __init__(self, input_dropout=0.0, rel_emb_from_gnn=True, num_relations=None, embedding_dim=None, loss_name='BCELoss'): super(DistMultLayer, self).__init__() self.rel_emb_from_gnn = rel_emb_from_gnn self.inp_drop = nn.Dropout(input_dropout) if self.rel_emb_from_gnn == False: assert num_relations != None assert embedding_dim != None self.rel_emb = nn.Embedding(num_relations, embedding_dim) self.reset_parameters() self.loss_name = loss_name self.reset_parameters() def reset_parameters(self): if self.rel_emb_from_gnn == False: nn.init.xavier_normal_(self.rel_emb.weight.data) def forward(self, node_emb, rel_emb=None, list_e_r_pair_idx=None, list_e_e_pair_idx=None, multi_label=None): r""" Parameters ---------- node_emb: tensor [N,H] N: number of nodes in the whole KG graph H: length of the node embeddings (entity embeddings) rel_emb: tensor [N_r,H] N_r: number of relations in the whole KG graph H: length of the relation embeddings list_e_r_pair_idx: list of tuple a list of index of head entities and relations that needs predicting the tail entities between them. Default: `None` list_e_e_pair_idx: list of tuple a list of index of head entities and tail entities that needs predicting the relations between them. Default: `None`. Only one of `list_e_r_pair_idx` and `list_e_e_pair_idx` can be `None`. multi_label: tensor [L, N] multi_label is a binary matrix. Each element can be equal to 1 for true label and 0 for false label (or 1 for true label, -1 for false label). multi_label[i] represents a multi-label of a given head-rel pair or head-tail pair. L is the length of list_e_r_pair_idx, list_e_e_pair_idx or batch size. N: number of nodes in the whole KG graph. Returns ------- logit tensor: [N, num_class] The score logits for all nodes preidcted. """ if self.rel_emb_from_gnn == False: assert rel_emb == None rel_emb = self.rel_emb.weight if list_e_r_pair_idx == None and list_e_e_pair_idx == None: raise RuntimeError("Only one of `list_e_r_pair_idx` and `list_e_e_pair_idx` can be `None`.") assert node_emb.size()[1]==rel_emb.size()[1] if list_e_r_pair_idx != None: ent_idxs = torch.LongTensor([x[0] for x in list_e_r_pair_idx]) rel_idxs = torch.LongTensor([x[1] for x in list_e_r_pair_idx]) selected_ent_embs = node_emb[ent_idxs].squeeze() # [L, H]. L is the length of list_e_r_pair_idx selected_rel_embs = rel_emb[rel_idxs].squeeze() # [L, H]. L is the length of list_e_r_pair_idx # dropout selected_ent_embs = self.inp_drop(selected_ent_embs) selected_rel_embs = self.inp_drop(selected_rel_embs) logits = torch.mm(selected_ent_embs * selected_rel_embs, node_emb.transpose(1, 0)) elif list_e_e_pair_idx != None: ent_head_idxs = torch.LongTensor([x[0] for x in list_e_e_pair_idx]) ent_tail_idxs = torch.LongTensor([x[1] for x in list_e_e_pair_idx]) selected_ent_head_embs = node_emb[ent_head_idxs].squeeze() # [L, H]. L is the length of list_e_e_pair_idx selected_ent_tail_embs = rel_emb[ent_tail_idxs].squeeze() # [L, H]. L is the length of list_e_e_pair_idx # dropout selected_ent_head_embs = self.inp_drop(selected_ent_head_embs) selected_ent_tail_embs = self.inp_drop(selected_ent_tail_embs) logits = torch.mm(selected_ent_head_embs*selected_ent_tail_embs, rel_emb.transpose(1, 0)) if self.loss_name in ['SoftMarginLoss']: # target labels are numbers selecting from -1 and 1. pred = torch.tanh(logits) else: # target labels are numbers selecting from 0 and 1. pred = torch.sigmoid(logits) # if multi_label!=None: if type(multi_label) != type(None): idxs_pos = torch.nonzero(multi_label == 1.) pred_pos = pred[idxs_pos[:, 0], idxs_pos[:, 1]] idxs_neg = torch.nonzero(multi_label == 0.) pred_neg = pred[idxs_neg[:, 0], idxs_neg[:, 1]] return pred, pred_pos, pred_neg else: return pred class DistMultGNN(torch.nn.Module): def __init__(self, num_entities, num_relations, loss_name='BCELoss'): super(DistMultGNN, self).__init__() self.emb_e = torch.nn.Embedding(num_entities, Config.init_emb_size) self.gc1 = GraphConvolution(Config.init_emb_size, Config.gc1_emb_size, num_relations) self.gc2 = GraphConvolution(Config.gc1_emb_size, Config.embedding_dim, num_relations) # self.emb_rel = torch.nn.Embedding(num_relations, Config.embedding_dim) self.loss_name = loss_name if loss_name == 'BCELoss': # Multi-Class Loss (Binary Cross Entropy Loss) self.loss = torch.nn.BCELoss() elif loss_name == "SoftplusLoss": self.loss = SoftplusLoss() elif loss_name == "SigmoidLoss": self.loss = SigmoidLoss() elif loss_name == "SoftMarginLoss": self.loss = nn.SoftMarginLoss() elif loss_name == "MSELoss": self.loss = nn.MSELoss() else: raise NotImplementedError() self.register_parameter('b', Parameter(torch.zeros(num_entities))) self.fc = torch.nn.Linear(Config.embedding_dim*Config.channels,Config.embedding_dim) self.bn3 = torch.nn.BatchNorm1d(Config.gc1_emb_size) self.bn4 = torch.nn.BatchNorm1d(Config.embedding_dim) self.dismult_layer = DistMultLayer(rel_emb_from_gnn=False, num_relations=num_relations, embedding_dim=Config.embedding_dim, loss_name=self.loss_name) print(num_entities, num_relations) self.init() def init(self): xavier_normal_(self.emb_e.weight.data) # xavier_normal_(self.emb_rel.weight.data) xavier_normal_(self.gc1.weight.data) xavier_normal_(self.gc2.weight.data) def forward(self, e1, rel, X, A, e2_multi=None): emb_initial = self.emb_e(X) x = self.gc1(emb_initial, A) x = self.bn3(x) x = torch.tanh(x) x = torch.dropout(x, Config.dropout_rate, train=self.training) x = self.bn4(self.gc2(x, A)) e1_embedded_all = torch.tanh(x) e1_embedded_all = torch.dropout(e1_embedded_all, Config.dropout_rate, train=self.training) # e1_embedded = e1_embedded_all[e1] # rel_embedded = self.emb_rel(rel) list_e_r_pair_idx = list(zip(e1.squeeze().tolist(), rel.squeeze().tolist())) # TODO: emb_rel from gnn pred = self.dismult_layer(e1_embedded_all, list_e_r_pair_idx = list_e_r_pair_idx, multi_label=e2_multi) # pred = self.dismult_layer(e1_embedded_all, self.emb_rel.weight, list_e_r_pair_idx, multi_label=e2_multi) return pred class TransELayer(KGCompletionLayerBase): r"""Specific class for knowledge graph completion task. TransE from paper `Translating Embeddings for Modeling Multi-relational Data <https://papers.nips.cc/paper/5071 -translating-embeddings-for-modeling-multi-relational-data.pdf>`__. .. math:: f(s, r, o) & = ||e_s + w_r - e_o||_p Parameters ---------- p_norm: int Default: 1 rel_emb_from_gnn: bool If `rel_emb` is computed from GNN, rel_emb_from_gnn is set to `True`. Else, rel_emb is initialized as nn.Embedding randomly. Default: `True`. num_relations: int Number of relations. `num_relations` is needed if rel_emb_from_gnn==True. Default: `None`. embedding_dim: int Dimension of the rel_emb. `embedding_dim` is needed if rel_emb_from_gnn==True. Default: `0`. loss_name: str The loss type selected fot the KG completion task. """ def __init__(self, p_norm=1, rel_emb_from_gnn=True, num_relations=None, embedding_dim=None, loss_name='BCELoss'): super(TransELayer, self).__init__() self.p_norm = p_norm self.rel_emb_from_gnn = rel_emb_from_gnn if self.rel_emb_from_gnn == False: assert num_relations != None assert embedding_dim != None self.rel_emb = nn.Embedding(num_relations, embedding_dim) self.reset_parameters() self.loss_name = loss_name def reset_parameters(self): if self.rel_emb_from_gnn == False: nn.init.xavier_normal_(self.rel_emb.weight.data) def forward(self, node_emb, rel_emb=None, list_e_r_pair_idx=None, list_e_e_pair_idx=None, multi_label=None): r""" Parameters ---------- node_emb: tensor [N,H] N: number of nodes in the whole KG graph H: length of the node embeddings (entity embeddings) rel_emb: tensor [N_r,H] N: number of relations in the whole KG graph H: length of the relation embeddings list_e_r_pair_idx: list of tuple a list of index of head entities and relations that needs predicting the tail entities between them. Default: `None` list_e_e_pair_idx: list of tuple a list of index of head entities and tail entities that needs predicting the relations between them. Default: `None`. Only one of `list_e_r_pair_idx` and `list_e_e_pair_idx` can be `None`. multi_label: tensor [L, N] multi_label is a binary matrix. Each element can be equal to 1 for true label and 0 for false label (or 1 for true label, -1 for false label). multi_label[i] represents a multi-label of a given head-rel pair or head-tail pair. L is the length of list_e_r_pair_idx, list_e_e_pair_idx or batch size. N: number of nodes in the whole KG graph. Returns ------- logit tensor: [N, num_class] The score logits for all nodes preidcted. """ if self.rel_emb_from_gnn == False: assert rel_emb == None rel_emb = self.rel_emb.weight if list_e_r_pair_idx == None and list_e_e_pair_idx == None: raise RuntimeError("Only one of `list_e_r_pair_idx` and `list_e_e_pair_idx` can be `None`.") assert node_emb.size()[1] == rel_emb.size()[1] if list_e_r_pair_idx != None: ent_idxs = torch.LongTensor([x[0] for x in list_e_r_pair_idx]) rel_idxs = torch.LongTensor([x[1] for x in list_e_r_pair_idx]) selected_ent_embs = node_emb[ent_idxs].squeeze() # [L, H]. L is the length of list_e_r_pair_idx selected_rel_embs = rel_emb[rel_idxs].squeeze() # [L, H]. L is the length of list_e_r_pair_idx selected_ent_embs = F.normalize(selected_ent_embs, 2, -1) selected_rel_embs = F.normalize(selected_rel_embs, 2, -1) node_emb = F.normalize(node_emb, 2, -1) head_add_rel = selected_ent_embs + selected_rel_embs # [L, H] head_add_rel = head_add_rel.view(head_add_rel.size()[0], 1, head_add_rel.size()[1]) # [L, 1, H] head_add_rel = head_add_rel.repeat(1, node_emb.size()[0], 1) node_emb = node_emb.view(1, node_emb.size()[0], node_emb.size()[1]) # [1, N, H] node_emb = node_emb.repeat(head_add_rel.size()[0], 1, 1) result = head_add_rel - node_emb # head+rel-tail [L, N, H] elif list_e_e_pair_idx != None: ent_head_idxs = torch.LongTensor([x[0] for x in list_e_e_pair_idx]) ent_tail_idxs = torch.LongTensor([x[1] for x in list_e_e_pair_idx]) selected_ent_head_embs = node_emb[ent_head_idxs].squeeze() # [L, H]. L is the length of list_e_e_pair_idx selected_ent_tail_embs = rel_emb[ent_tail_idxs].squeeze() # [L, H]. L is the length of list_e_e_pair_idx selected_ent_head_embs = F.normalize(selected_ent_head_embs, 2, -1) selected_ent_tail_embs = F.normalize(selected_ent_tail_embs, 2, -1) rel_emb = F.normalize(rel_emb, 2, -1) head_sub_tail =
not self.__flag: self.__cond.wait(timeout) return self.__flag finally: self.__cond.release() # Helper to generate new thread names _counter = _count().next _counter() # Consume 0 so first non-main thread has id 1. def _newname(template="Thread-%d"): return template % _counter() # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} # Main class for threads class Thread(_Verbose): """A class that represents a thread of control. This class can be safely subclassed in a limited fashion. """ __initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType __exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. __exc_clear = _sys.exc_clear def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None): """This constructor should always be called with keyword arguments. Arguments are: *group* should be None; reserved for future extension when a ThreadGroup class is implemented. *target* is the callable object to be invoked by the run() method. Defaults to None, meaning nothing is called. *name* is the thread name. By default, a unique name is constructed of the form "Thread-N" where N is a small decimal number. *args* is the argument tuple for the target invocation. Defaults to (). *kwargs* is a dictionary of keyword arguments for the target invocation. Defaults to {}. If a subclass overrides the constructor, it must make sure to invoke the base class constructor (Thread.__init__()) before doing anything else to the thread. """ assert group is None, "group argument must be None for now" _Verbose.__init__(self, verbose) if kwargs is None: kwargs = {} self.__target = target self.__name = str(name or _newname()) self.__args = args self.__kwargs = kwargs self.__daemonic = self._set_daemon() self.__ident = None self.__started = Event() self.__stopped = False self.__block = Condition(Lock()) self.__initialized = True # sys.stderr is not stored in the class like # sys.exc_info since it can be changed between instances self.__stderr = _sys.stderr def _reset_internal_locks(self): # private! Called by _after_fork() to reset our internal locks as # they may be in an invalid state leading to a deadlock or crash. if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block self.__block.__init__() self.__started._reset_internal_locks() @property def _block(self): # used by a unittest return self.__block def _set_daemon(self): # Overridden in _MainThread and _DummyThread return current_thread().daemon def __repr__(self): assert self.__initialized, "Thread.__init__() was not called" status = "initial" if self.__started.is_set(): status = "started" if self.__stopped: status = "stopped" if self.__daemonic: status += " daemon" if self.__ident is not None: status += " %s" % self.__ident return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status) def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self.__initialized: raise RuntimeError("thread.__init__() not called") if self.__started.is_set(): raise RuntimeError("threads can only be started once") if __debug__: self._note("%s.start(): starting thread", self) with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self.__bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self.__started.wait() def run(self): """Method representing the thread's activity. You may override this method in a subclass. The standard run() method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the args and kwargs arguments, respectively. """ try: if self.__target: self.__target(*self.__args, **self.__kwargs) finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self.__target, self.__args, self.__kwargs def __bootstrap(self): # Wrapper around the real bootstrap code that ignores # exceptions during interpreter cleanup. Those typically # happen when a daemon thread wakes up at an unfortunate # moment, finds the world around it destroyed, and raises some # random exception *** while trying to report the exception in # __bootstrap_inner() below ***. Those random exceptions # don't help anybody, and they confuse users, so we suppress # them. We suppress them only when it appears that the world # indeed has already been destroyed, so that exceptions in # __bootstrap_inner() during normal business hours are properly # reported. Also, we only suppress them for daemonic threads; # if a non-daemonic encounters this, something else is wrong. try: self.__bootstrap_inner() except: if self.__daemonic and _sys is None: return raise def _set_ident(self): self.__ident = _get_ident() def __bootstrap_inner(self): try: self._set_ident() self.__started.set() with _active_limbo_lock: _active[self.__ident] = self del _limbo[self] if __debug__: self._note("%s.__bootstrap(): thread started", self) if _trace_hook: self._note("%s.__bootstrap(): registering trace hook", self) _sys.settrace(_trace_hook) if _profile_hook: self._note("%s.__bootstrap(): registering profile hook", self) _sys.setprofile(_profile_hook) try: self.run() except SystemExit: if __debug__: self._note("%s.__bootstrap(): raised SystemExit", self) except: if __debug__: self._note("%s.__bootstrap(): unhandled exception", self) # If sys.stderr is no more (most likely from interpreter # shutdown) use self.__stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. if _sys and _sys.stderr is not None: print>>_sys.stderr, ("Exception in thread %s:\n%s" % (self.name, _format_exc())) elif self.__stderr is not None: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) exc_type, exc_value, exc_tb = self.__exc_info() try: print>>self.__stderr, ( "Exception in thread " + self.name + " (most likely raised during interpreter shutdown):") print>>self.__stderr, ( "Traceback (most recent call last):") while exc_tb: print>>self.__stderr, ( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)) exc_tb = exc_tb.tb_next print>>self.__stderr, ("%s: %s" % (exc_type, exc_value)) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb else: if __debug__: self._note("%s.__bootstrap(): normal return", self) finally: # Prevent a race in # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. self.__exc_clear() finally: with _active_limbo_lock: self.__stop() try: # We don't call self.__delete() because it also # grabs _active_limbo_lock. del _active[_get_ident()] except: pass def __stop(self): # DummyThreads delete self.__block, but they have no waiters to # notify anyway (join() is forbidden on them). if not hasattr(self, '_Thread__block'): return self.__block.acquire() self.__stopped = True self.__block.notify_all() self.__block.release() def __delete(self): "Remove current thread from the dict of currently running threads." # Notes about running with dummy_thread: # # Must take care to not raise an exception if dummy_thread is being # used (and thus this module is being used as an instance of # dummy_threading). dummy_thread.get_ident() always returns -1 since # there is only one thread if dummy_thread is being used. Thus # len(_active) is always <= 1 here, and any Thread instance created # overwrites the (if any) thread currently registered in _active. # # An instance of _MainThread is always created by 'threading'. This # gets overwritten the instant an instance of Thread is created; both # threads return -1 from dummy_thread.get_ident() and thus have the # same key in the dict. So when the _MainThread instance created by # 'threading' tries to clean itself up when atexit calls this method # it gets a KeyError if another Thread instance was created. # # This all means that KeyError from trying to delete something from # _active if dummy_threading is being used is a red herring. But # since it isn't if dummy_threading is *not* being used then don't # hide the exception. try: with _active_limbo_lock: del _active[_get_ident()] # There must not be any python code between the previous line # and after the lock is released. Otherwise a tracing function # could try to acquire the lock again in the same thread, (in # current_thread()), and would block. except KeyError: if 'dummy_threading' not in _sys.modules: raise def join(self, timeout=None):
import struct import unittest import unittest.mock as mock from io import StringIO from numpy import float32 import settings from interpreter import exceptions as ex from interpreter import memory, syscalls from interpreter.classes import Label from interpreter.interpreter import Interpreter ''' https://github.com/sbustars/STARS Copyright 2020 <NAME>, <NAME>, and <NAME> Developed by <NAME> (<EMAIL>), <NAME> (<EMAIL>), and <NAME> (<EMAIL>) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' def out(s, end=''): print(s, end=end) class TestSyscalls(unittest.TestCase): # syscall 1 @mock.patch('sys.stdout', new_callable=StringIO) def test_printInt(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 0} syscalls.printInt(inter) self.assertEqual(mock_stdout.getvalue(), str(0)) @mock.patch('sys.stdout', new_callable=StringIO) def test_printNegInt(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': -1} syscalls.printInt(inter) self.assertEqual(mock_stdout.getvalue(), str(-1)) @mock.patch('sys.stdout', new_callable=StringIO) def test_printLargeInt(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 0x7FFFFFFF} syscalls.printInt(inter) self.assertEqual(mock_stdout.getvalue(), str(0x7FFFFFFF)) @mock.patch('sys.stdout', new_callable=StringIO) def test_printLargeNegInt(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': -2147483648} syscalls.printInt(inter) self.assertEqual(mock_stdout.getvalue(), str(-2147483648)) # syscall 2 @mock.patch('sys.stdout', new_callable=StringIO) def test_printFloat(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.f_reg = {'$f12': float32(420.42)} syscalls.printFloat(inter) self.assertEqual(mock_stdout.getvalue(), '420.42') @mock.patch('sys.stdout', new_callable=StringIO) def test_printFloatBig(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.f_reg = {'$f12': float32(42.0E17)} syscalls.printFloat(inter) self.assertEqual(mock_stdout.getvalue(), '4.2e+18') @mock.patch('sys.stdout', new_callable=StringIO) def test_printFloatInf(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.f_reg = {'$f12': float32('inf')} syscalls.printFloat(inter) self.assertEqual(mock_stdout.getvalue(), 'inf') # syscall 3 @mock.patch('sys.stdout', new_callable=StringIO) def test_printDouble(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.f_reg = {'$f12': float32(1.26443839488E11), '$f13': float32(3.9105663299560546875E0)} syscalls.printDouble(inter) self.assertEqual(mock_stdout.getvalue(), '420.42') @mock.patch('sys.stdout', new_callable=StringIO) def test_printDoubleBig(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.f_reg = {'$f12': float32(-2.4833974245227757568E19), '$f13': float32(4.1028668212890625E2)} syscalls.printDouble(inter) self.assertEqual(mock_stdout.getvalue(), '4.2e+18') @mock.patch('sys.stdout', new_callable=StringIO) def test_printDoubleInf(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inf_int = 0x7FF00000 inf_bytes = struct.pack('>i', inf_int) inf_float = struct.unpack('>f', inf_bytes)[0] inter.f_reg = {'$f12': float32(0), '$f13': inf_float} syscalls.printDouble(inter) self.assertEqual(mock_stdout.getvalue(), 'inf') # syscall 4 @mock.patch('sys.stdout', new_callable=StringIO) def test_printString(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('words', inter.mem.dataPtr) syscalls.printString(inter) self.assertEqual(mock_stdout.getvalue(), 'words') @mock.patch('sys.stdout', new_callable=StringIO) def test_printInvalidString(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAscii('words', inter.mem.dataPtr) inter.mem.dataPtr += 5 inter.mem.addByte(255, inter.mem.dataPtr) inter.mem.dataPtr += 1 inter.mem.addAsciiz('words', inter.mem.dataPtr) self.assertRaises(ex.InvalidCharacter, syscalls.printString, inter) @mock.patch('sys.stdout', new_callable=StringIO) def test_printInvalidString2(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAscii('words', inter.mem.dataPtr) inter.mem.dataPtr += 5 inter.mem.addByte(8, inter.mem.dataPtr) inter.mem.dataPtr += 1 inter.mem.addAsciiz('words', inter.mem.dataPtr) self.assertRaises(ex.InvalidCharacter, syscalls.printString, inter) @mock.patch('sys.stdout', new_callable=StringIO) def test_printEmptyString(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addByte(0, inter.mem.dataPtr) syscalls.printString(inter) self.assertEqual(mock_stdout.getvalue(), '') # sycall 5 @mock.patch('builtins.input', side_effect=['0']) def test_readInt(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$v0': 8} syscalls.readInteger(inter) self.assertEqual(0, inter.reg['$v0']) @mock.patch('builtins.input', side_effect=['-1']) def test_readNegInt(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$v0': 8} syscalls.readInteger(inter) self.assertEqual(-1, inter.reg['$v0']) @mock.patch('builtins.input', side_effect=['A']) def test_readInvalidInt(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$v0': 8} self.assertRaises(ex.InvalidInput, syscalls.readInteger, inter) # syscall 6 def test_atoi(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('02113', inter.mem.dataPtr) syscalls.atoi(inter) self.assertEqual(2113, inter.reg['$v0']) def test_atoi_zero(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('0', inter.mem.dataPtr) syscalls.atoi(inter) self.assertEqual(0, inter.reg['$v0']) def test_atoi_neg(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('-12345', inter.mem.dataPtr) syscalls.atoi(inter) self.assertEqual(-12345, inter.reg['$v0']) def test_atoi_bad1(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('--12345', inter.mem.dataPtr) self.assertRaises(ex.InvalidCharacter, syscalls.atoi, inter) def test_atoi_bad2(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('123e45', inter.mem.dataPtr) self.assertRaises(ex.InvalidCharacter, syscalls.atoi, inter) def test_atoi_bad_empty(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr} inter.mem.addAsciiz('', inter.mem.dataPtr) self.assertRaises(ex.InvalidCharacter, syscalls.atoi, inter) # syscall 8 @mock.patch('builtins.input', side_effect=['uwu']) def test_readString(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr, '$a1': 3} syscalls.readString(inter) s = syscalls.getString(inter.mem.dataPtr, inter.mem, num_chars=3) self.assertEqual('uwu', s) @mock.patch('builtins.input', side_effect=['uwu uwu']) def test_underReadString(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr, '$a1': 3} syscalls.readString(inter) s = syscalls.getString(inter.mem.dataPtr, inter.mem, num_chars=3) self.assertEqual('uwu', s) @mock.patch('builtins.input', side_effect=['uwu uwu']) def test_overReadString(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr, '$a1': 9} syscalls.readString(inter) s = syscalls.getString(inter.mem.dataPtr, inter.mem, num_chars=9) self.assertEqual('uwu uwu', s) @mock.patch('builtins.input', side_effect=[str(chr(0xFF))]) def test_readWeirdString(self, input): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': inter.mem.dataPtr, '$a1': 9} syscalls.readString(inter) s = inter.mem.getByte(inter.mem.dataPtr, signed=False) self.assertEqual(str(chr(0xFF)), str(chr(0xFF))) # syscall 9 def test_sbrk(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 5, '$v0': 0} out = inter.mem.heapPtr syscalls.sbrk(inter) self.assertEqual(out, inter.reg['$v0']) self.assertEqual(out + inter.reg['$a0'] + (4 - (inter.reg['$a0'] % 4)), inter.mem.heapPtr) def test_Negsbrk(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': -1, '$v0': 0} self.assertRaises(ex.InvalidArgument, syscalls.sbrk, inter) def test_Negsbrk2(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 0xFFFFFFFF, '$v0': 0} self.assertRaises(ex.InvalidArgument, syscalls.sbrk, inter) def test_0sbrk(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 0, '$v0': 0} out = inter.mem.heapPtr syscalls.sbrk(inter) self.assertEqual(out, inter.reg['$v0']) self.assertEqual(out, inter.mem.heapPtr) def test_Maxsbrk(self): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': settings.settings['initial_$sp'] - inter.mem.heapPtr, '$v0': 0} out = inter.mem.heapPtr syscalls.sbrk(inter) self.assertEqual(out, inter.reg['$v0']) heap = out + inter.reg['$a0'] if heap % 4 != 0: heap += 4 - (heap % 4) self.assertEqual(out + inter.reg['$a0'], inter.mem.heapPtr) # syscall 11 @mock.patch('sys.stdout', new_callable=StringIO) def test_printChar(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': ord('A')} syscalls.printChar(inter) self.assertEqual(mock_stdout.getvalue(), 'A') @mock.patch('sys.stdout', new_callable=StringIO) def test_printInvalidChar(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 8} self.assertRaises(ex.InvalidCharacter, syscalls.printChar, inter) @mock.patch('sys.stdout', new_callable=StringIO) def test_printInvalidChar2(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.reg = {'$a0': 255} self.assertRaises(ex.InvalidCharacter, syscalls.printChar, inter) # syscall 30 @mock.patch('sys.stdout', new_callable=StringIO) def test_dumpMem(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.mem.addAsciiz('uwu hewwo worwd >.<', inter.mem.dataPtr) inter.reg = {'$a0': inter.mem.dataPtr, '$a1': inter.mem.dataPtr + 12} syscalls.memDump(inter) self.assertEqual('''addr hex ascii 0x10010000 20 75 77 75 u w u 0x10010004 77 77 65 68 w w e h 0x10010008 6f 77 20 6f o w o ''', mock_stdout.getvalue()) @mock.patch('sys.stdout', new_callable=StringIO) def test_dumpMemBadChar(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.mem.addAsciiz('uwu hew' + str(chr(255)) + 'o worwd >.<', inter.mem.dataPtr) inter.reg = {'$a0': inter.mem.dataPtr, '$a1': inter.mem.dataPtr + 12} syscalls.memDump(inter) self.assertEqual('''addr hex ascii 0x10010000 20 75 77 75 u w u 0x10010004 ff 77 65 68 . w e h 0x10010008 6f 77 20 6f o w o ''', mock_stdout.getvalue()) @mock.patch('sys.stdout', new_callable=StringIO) def test_dumpMemBadChar2(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.mem.addAsciiz('uwu hew' + str(chr(20)) + 'o worwd >.<', inter.mem.dataPtr) inter.reg = {'$a0': inter.mem.dataPtr, '$a1': inter.mem.dataPtr + 12} syscalls.memDump(inter) self.assertEqual('''addr hex ascii 0x10010000 20 75 77 75 u w u 0x10010004 14 77 65 68 . w e h 0x10010008 6f 77 20 6f o w o ''', mock_stdout.getvalue()) @mock.patch('sys.stdout', new_callable=StringIO) def test_dumpMemNull(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.mem.addAsciiz('uwu hew' + str(chr(0)) + 'o worwd >.<', inter.mem.dataPtr) inter.reg = {'$a0': inter.mem.dataPtr, '$a1': inter.mem.dataPtr + 12} syscalls.memDump(inter) self.assertEqual('''addr hex ascii 0x10010000 20 75 77 75 u w u 0x10010004 00 77 65 68 \\0 w e h 0x10010008 6f 77 20 6f o w o ''', mock_stdout.getvalue()) @mock.patch('sys.stdout', new_callable=StringIO) def test_dumpMemTab(self, mock_stdout): inter = Interpreter([Label('main')], []) inter.mem = memory.Memory() inter.mem.addAsciiz('uwu hew' + str(chr(9)) + 'o worwd >.<', inter.mem.dataPtr) inter.reg = {'$a0': inter.mem.dataPtr, '$a1': inter.mem.dataPtr + 12} syscalls.memDump(inter) self.assertEqual('''addr hex ascii 0x10010000 20 75 77
import os from helper_evaluate import compute_accuracy, compute_mae_and_mse from helper_losses import niu_loss, coral_loss, conditional_loss, conditional_loss_ablation from helper_data import levels_from_labelbatch import time import torch import torch.nn.functional as F from collections import OrderedDict import json import subprocess import sys import numpy as np def iteration_logging(info_dict, batch_idx, loss, train_dataset, frequency, epoch): logfile = info_dict['settings']['training logfile'] batch_size = info_dict['settings']['batch size'] num_epochs = info_dict['settings']['num epochs'] info_dict['training']['minibatch loss'].append(loss.item()) if not batch_idx % frequency: s = (f'Epoch: {epoch:03d}/{num_epochs:03d} | ' f'Batch {batch_idx:04d}/' f'{len(train_dataset)//batch_size:04d} | ' f'Loss: {loss:.4f}') print(s) with open(logfile, 'a') as f: f.write(f'{s}\n') def epoch_logging(info_dict, model, loss, epoch, start_time, which_model, device, train_loader, valid_loader, skip_train_eval=False): #device = torch.device('cpu') path = info_dict['settings']['output path'] logfile = info_dict['settings']['training logfile'] model.eval() with torch.no_grad(): # save memory during inference train_acc = compute_accuracy( model, train_loader, device=device, which_model=which_model) valid_acc = compute_accuracy( model, valid_loader, device=device, which_model=which_model) train_mae,train_mse = compute_mae_and_mse(model, train_loader, device=device, which_model=which_model, with_embedding=False) valid_mae,valid_mse = compute_mae_and_mse(model, valid_loader, device=device, which_model=which_model, with_embedding=False) valid_rmse = torch.sqrt(valid_mse) train_rmse = torch.sqrt(train_mse) info_dict['training']['epoch train mae'].append(train_mae.item()) info_dict['training']['epoch train acc'].append(train_acc.item()) info_dict['training']['epoch train rmse'].append(train_rmse.item()) info_dict['training']['epoch valid mae'].append(valid_mae.item()) info_dict['training']['epoch valid acc'].append(valid_acc.item()) info_dict['training']['epoch valid rmse'].append(valid_rmse.item()) # if valid_mae < best_mae: # best_mae, best_mse, best_epoch = valid_mae, valid_mse, epoch # torch.save(model.state_dict(), os.path.join(PATH,'best_model.pt')) if valid_rmse < info_dict['training']['best running rmse']: info_dict['training']['best running mae'] = valid_mae.item() info_dict['training']['best running rmse'] = valid_rmse.item() info_dict['training']['best running acc'] = valid_acc.item() info_dict['training']['best running epoch'] = epoch # ######### SAVE MODEL ############# torch.save(model.state_dict(), os.path.join(path, 'best_model.pt')) s = (f'MAE/RMSE/ACC: Current Valid: {valid_mae:.2f}/' f'{valid_rmse:.2f}/' f'{info_dict["training"]["best running acc"]:.2f} Ep. {epoch} |' f' Best Valid: {info_dict["training"]["best running mae"]:.2f}' f'/{info_dict["training"]["best running rmse"]:.2f}' f'/{info_dict["training"]["best running acc"]:.2f}' f' | Ep. {info_dict["training"]["best running epoch"]}') print(s) with open(logfile, 'a') as f: f.write('%s\n' % s) s = f'Time elapsed: {(time.time() - start_time)/60:.2f} min' print(s) with open(logfile, 'a') as f: f.write(f'{s}\n') # if verbose: # print(f'Epoch: {epoch+1:03d}/{num_epochs:03d} ' # f'| Train: {train_acc :.2f}% ' # f'| Validation: {valid_acc :.2f}%') # print('MAE/RMSE: | Current Valid: %.2f/%.2f Ep. %d | Best Valid : %.2f/%.2f Ep. %d' % ( # valid_mae, valid_mse, epoch, best_mae, best_mse, best_epoch)) # train_acc_list.append(train_acc.item()) # valid_acc_list.append(valid_acc.item()) # elapsed = (time.time() - start_time)/60 # if verbose: # print(f'Time elapsed: {elapsed:.2f} min') def aftertraining_logging(model, which, info_dict, train_loader, device, valid_loader, test_loader, which_model, start_time=None): path = info_dict['settings']['output path'] logfile = info_dict['settings']['training logfile'] # if which == 'last': # torch.save(model.state_dict(), os.path.join(path, 'last_model.pt')) # info_dict_key = 'last' # log_key = '' # elif which == 'best': # model.load_state_dict(torch.load(os.path.join(path, 'best_model.pt'))) # info_dict_key = 'best' # log_key = 'Best ' # else: # raise ValueError('`which` must be "last" or "best"') # elapsed = (time.time() - start_time)/60 # print(f'Total Training Time: {elapsed:.2f} min') model.load_state_dict(torch.load(os.path.join(path, 'best_model.pt'))) info_dict_key = 'best' log_key = 'Best ' model.eval() with torch.set_grad_enabled(False): train_mae, train_mse = compute_mae_and_mse(model, train_loader, device=device, which_model=which_model, with_embedding=False) valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader, device=device, which_model=which_model, with_embedding=False) test_mae, test_mse = compute_mae_and_mse(model, test_loader, device=device, which_model=which_model, with_embedding=False) train_rmse, valid_rmse = torch.sqrt(train_mse), torch.sqrt(valid_mse) test_rmse = torch.sqrt(test_mse) # s = 'MAE/RMSE: | Best Train: %.2f/%.2f | Best Valid: %.2f/%.2f | Best Test: %.2f/%.2f' % ( # train_mae, test_mse, # valid_mae, valid_mse, # test_mae, test_mse) s = (f'MAE/RMSE: | {log_key}Train: {train_mae:.2f}/{train_rmse:.2f} ' f'| {log_key}Valid: {valid_mae:.2f}/{valid_rmse:.2f} ' f'| {log_key}Test: {test_mae:.2f}/{test_rmse:.2f}') print(s) with open(logfile, 'a') as f: f.write(f'{s}\n') train_acc = compute_accuracy(model, train_loader, device=device, which_model=which_model) valid_acc = compute_accuracy(model, valid_loader, device=device, which_model=which_model) test_acc = compute_accuracy(model, test_loader, device=device, which_model=which_model) s = (f'ACC: | {log_key}Train: {train_acc:.2f} ' f'| {log_key}Valid: {valid_acc:.2f} ' f'| {log_key}Test: {test_acc:.2f}') print(s) with open(logfile, 'a') as f: f.write(f'{s}\n') if start_time is not None: s = f'Total Running Time: {(time.time() - start_time)/60:.2f} min' print(s) with open(logfile, 'a') as f: f.write(f'{s}\n') info_dict[info_dict_key]['train mae'] = train_mae.item() info_dict[info_dict_key]['train rmse'] = train_rmse.item() info_dict[info_dict_key]['train acc'] = train_acc.item() info_dict[info_dict_key]['valid mae'] = valid_mae.item() info_dict[info_dict_key]['valid rmse'] = valid_rmse.item() info_dict[info_dict_key]['valid acc'] = train_acc.item() info_dict[info_dict_key]['test mae'] = test_mae.item() info_dict[info_dict_key]['test rmse'] = test_rmse.item() info_dict[info_dict_key]['test acc'] = test_acc.item() # print(s) # model.eval() # with torch.no_grad(): # train_mae, train_mse = compute_mae_and_mse(model, train_loader, # device=device, # which_model=which_model) # valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader, # device=device, # which_model=which_model) # test_mae, test_mse = compute_mae_and_mse(model, test_loader, # device=device, # which_model=which_model) # train_rmse, valid_rmse = torch.sqrt(train_mse), torch.sqrt(valid_mse) # test_rmse = torch.sqrt(test_mse) # train_acc = compute_accuracy(model, train_loader, # device=device, # which_model=which_model) # valid_acc = compute_accuracy(model, valid_loader, # device=device, # which_model=which_model) # test_acc = compute_accuracy(model, test_loader, # device=device, # which_model=which_model) # s = (f'MAE/RMSE: | {log_key}Train: {train_mae:.2f}/{train_rmse:.2f} ' # f'| {log_key}Valid: {valid_mae:.2f}/{valid_rmse:.2f} ' # f'| {log_key}Test: {test_mae:.2f}/{test_rmse:.2f}') # print(s) # with open(logfile, 'a') as f: # f.write(f'{s}\n') # s = (f'ACC: | {log_key}Train: {train_acc:.2f} ' # f'| {log_key}Valid: {valid_acc:.2f} ' # f'| {log_key}Test: {test_acc:.2f}') # print(s) # with open(logfile, 'a') as f: # f.write(f'{s}\n') # if start_time is not None: # s = f'Total Running Time: {(time.time() - start_time)/60:.2f} min' # print(s) # with open(logfile, 'a') as f: # f.write(f'{s}\n') # info_dict[info_dict_key]['train mae'] = train_mae.item() # info_dict[info_dict_key]['train rmse'] = train_rmse.item() # info_dict[info_dict_key]['train acc'] = train_acc.item() # info_dict[info_dict_key]['valid mae'] = valid_mae.item() # info_dict[info_dict_key]['valid rmse'] = valid_rmse.item() # info_dict[info_dict_key]['valid acc'] = train_acc.item() # info_dict[info_dict_key]['test mae'] = test_mae.item() # info_dict[info_dict_key]['test rmse'] = test_rmse.item() # info_dict[info_dict_key]['test acc'] = test_acc.item() def create_logfile(info_dict): header = [] header.append(f'This script: {info_dict["settings"]["script"]}') header.append(f'PyTorch Version: {info_dict["settings"]["pytorch version"]}') # header.append(f'CUDA device: {info_dict["settings"]["cuda device"]}') # header.append(f'CUDA version: {info_dict["settings"]["cuda version"]}') header.append(f'Random seed: {info_dict["settings"]["random seed"]}') header.append(f'Learning rate: {info_dict["settings"]["learning rate"]}') header.append(f'Epochs: {info_dict["settings"]["num epochs"]}') header.append(f'Batch size: {info_dict["settings"]["batch size"]}') #header.append(f'Number of classes: {info_dict["settings"]["num classes"]}') header.append(f'Output path: {info_dict["settings"]["output path"]}') with open(info_dict["settings"]["training logfile"], 'w') as f: for entry in header: print(entry) f.write(f'{entry}\n') f.flush() def train_model_v2(model, num_epochs, train_loader, valid_loader, test_loader, optimizer, info_dict, device, logging_interval=50, verbose=1, scheduler=None, scheduler_on='valid_acc', which_model='categorical', with_embedding=False): start_time = time.time() if which_model == 'coral': PATH = '/Users/xintongshi/Desktop/GitHub/ordinal-conditional/src/models/mlp-for-tabular/runs/coral' elif which_model == 'niu': PATH = '/Users/xintongshi/Desktop/GitHub/ordinal-conditional/src/models/mlp-for-tabular/runs/niu' elif which_model == 'conditional': PATH = '/Users/xintongshi/Desktop/GitHub/ordinal-conditional/src/models/mlp-for-tabular/runs/conditional' else: PATH = '/Users/xintongshi/Desktop/GitHub/ordinal-conditional/src/models/mlp-for-tabular/runs/xentr' best_mae, best_mse, best_epoch = 999, 999, -1 info_dict['training'] = { 'num epochs': num_epochs, 'iter per epoch': len(train_loader), 'minibatch loss': [], 'epoch train mae': [], 'epoch train rmse': [], 'epoch train acc': [], 'epoch valid mae': [], 'epoch valid rmse': [], 'epoch valid acc': [], 'best running mae': np.infty, 'best running rmse': np.infty, 'best running acc': 0., 'best running epoch': -1 } for epoch in range(1,num_epochs+1): model.train() for batch_idx, (features, targets) in enumerate(train_loader): if which_model != 'categorical': levels = levels_from_labelbatch(targets, num_classes=model.num_classes) if with_embedding: features, residues = features features = features.to(device) residues = residues.to(device) targets = targets.to(device) if which_model != 'categorical': logits, probas = model(features, residues) else: logits = model(features, residues) else: features = features.to(device) targets = targets.to(device) if which_model == 'categorical': logits = model(features) else: logits, probas = model(features) if which_model == 'niu': loss = niu_loss(logits, levels) elif which_model == 'coral': loss = coral_loss(logits, levels) elif which_model == 'categorical': loss = torch.nn.functional.cross_entropy(logits, targets) elif which_model == 'conditional': loss = conditional_loss(logits, targets, num_classes=model.num_classes) elif which_model == 'conditional-ablation': loss = conditional_loss_ablation(logits, targets, num_classes=model.num_classes) else: raise ValueError('This which_model choice is not supported.') optimizer.zero_grad() loss.backward() # ## UPDATE MODEL PARAMETERS optimizer.step() # ## LOGGING iteration_logging(info_dict=info_dict, batch_idx=batch_idx, loss=loss, train_dataset=train_loader.dataset, frequency=50, epoch=epoch) # if verbose: # if not batch_idx % logging_interval: # print(f'Epoch: {epoch+1:03d}/{num_epochs:03d} ' # f'| Batch {batch_idx:04d}/{len(train_loader):04d} ' # f'| Loss: {loss:.4f}') #epoch logging epoch_logging(info_dict=info_dict, model=model, train_loader=train_loader, device=device, valid_loader=valid_loader, which_model=which_model, loss=loss, epoch=epoch, start_time=start_time) # model.eval() # with torch.no_grad(): # save memory during inference # train_acc = compute_accuracy( # model, train_loader, device=device, # with_embedding=with_embedding, # which_model=which_model) # valid_acc = compute_accuracy( # model, valid_loader, device=device, # with_embedding=with_embedding, # which_model=which_model) # train_mae,train_mse = compute_mae_and_mse(model, train_loader, device=device, which_model=which_model, # with_embedding=with_embedding) # valid_mae,valid_mse = compute_mae_and_mse(model, valid_loader, device=device, which_model=which_model, # with_embedding=with_embedding) # if valid_mae < best_mae: # best_mae, best_mse, best_epoch = valid_mae, valid_mse, epoch # torch.save(model.state_dict(), os.path.join(PATH,'best_model.pt')) # if verbose: # print(f'Epoch: {epoch+1:03d}/{num_epochs:03d} ' # f'| Train: {train_acc :.2f}% ' # f'| Validation: {valid_acc :.2f}%') # print('MAE/RMSE: | Current Valid: %.2f/%.2f Ep. %d | Best Valid : %.2f/%.2f Ep. %d' % ( # valid_mae, valid_mse, epoch, best_mae, best_mse, best_epoch)) # train_acc_list.append(train_acc.item()) # valid_acc_list.append(valid_acc.item()) # elapsed = (time.time() - start_time)/60 # if verbose: # print(f'Time elapsed: {elapsed:.2f} min') if scheduler is not None: # if scheduler_on == 'valid_acc': # scheduler.step(valid_acc_list[-1]) # elif scheduler_on == 'minibatch_loss': # scheduler.step(minibatch_loss_list[-1]) # else: #
level (>=14) first.') sys.exit(3) # Check ant install ant_path = Which('ant') if ant_path is None: print('failed\nAnt could not be found. Please make sure it is installed.') sys.exit(4) print('ok') def MakeApk(options, app_info, manifest): CheckSystemRequirements() Customize(options, app_info, manifest) name = app_info.android_name app_dir = os.path.join(tempfile.gettempdir(), name) packaged_archs = [] if options.mode == 'shared': # For shared mode, it's not necessary to use the whole xwalk core library, # use xwalk_core_library_java_app_part.jar from it is enough. java_app_part_jar = os.path.join(xwalk_dir, 'xwalk_core_library', 'libs', 'xwalk_core_library_java_app_part.jar') shutil.copy(java_app_part_jar, os.path.join(app_dir, 'libs')) Execution(options, name) elif options.mode == 'embedded': # Copy xwalk_core_library into app folder and move the native libraries # out. # When making apk for specified CPU arch, will only include the # corresponding native library by copying it back into xwalk_core_library. target_library_path = os.path.join(app_dir, 'xwalk_core_library') shutil.copytree(os.path.join(xwalk_dir, 'xwalk_core_library'), target_library_path) library_lib_path = os.path.join(target_library_path, 'libs') native_lib_path = os.path.join(app_dir, 'native_libs') os.makedirs(native_lib_path) available_archs = [] for dir_name in os.listdir(library_lib_path): lib_dir = os.path.join(library_lib_path, dir_name) if ContainsNativeLibrary(lib_dir): shutil.move(lib_dir, os.path.join(native_lib_path, dir_name)) available_archs.append(dir_name) if options.arch: Execution(options, name) packaged_archs.append(options.arch) else: # If the arch option is unspecified, all of available platform APKs # will be generated. valid_archs = ['x86', 'armeabi-v7a'] for arch in valid_archs: if arch in available_archs: if arch.find('x86') != -1: options.arch = 'x86' elif arch.find('arm') != -1: options.arch = 'arm' Execution(options, name) packaged_archs.append(options.arch) else: print('Warning: failed to create package for arch "%s" ' 'due to missing native library' % arch) if len(packaged_archs) == 0: print('No packages created, aborting') sys.exit(13) # if project_dir, save build directory if options.project_dir: print ('\nCreating project directory') save_dir = os.path.join(options.project_dir, name) if CreateAndCopyDir(app_dir, save_dir, True): print (' A project directory was created successfully in:\n %s' % os.path.abspath(save_dir)) print (' To manually generate an APK, run the following in that ' 'directory:') print (' ant release -f build.xml') print (' For more information, see:\n' ' http://developer.android.com/tools/building/' 'building-cmdline.html') else: print ('Error: Unable to create a project directory during the build. ' 'Please check the directory passed in --project-dir, ' 'available disk space, and write permission.') if not options.project_only: PrintPackageInfo(options, name, packaged_archs) def main(argv): parser = optparse.OptionParser() parser.add_option('-v', '--version', action='store_true', dest='version', default=False, help='The version of this python tool.') parser.add_option('--verbose', action="store_true", dest='verbose', default=False, help='Print debug messages.') info = ('The packaging mode of the web application. The value \'shared\' ' 'means that the runtime is shared across multiple application ' 'instances and that the runtime needs to be distributed separately. ' 'The value \'embedded\' means that the runtime is embedded into the ' 'application itself and distributed along with it.' 'Set the default mode as \'embedded\'. For example: --mode=embedded') parser.add_option('--mode', choices=('embedded', 'shared'), default='embedded', help=info) info = ('The target architecture of the embedded runtime. Supported values ' 'are \'x86\' and \'arm\'. Note, if undefined, APKs for all possible ' 'architestures will be generated.') parser.add_option('--arch', choices=AllArchitectures(), help=info) group = optparse.OptionGroup(parser, 'Application Source Options', 'This packaging tool supports 3 kinds of web application source: ' '1) XPK package; 2) manifest.json; 3) various command line options, ' 'for example, \'--app-url\' for website, \'--app-root\' and ' '\'--app-local-path\' for local web application.') info = ('The path of the XPK package. For example, --xpk=/path/to/xpk/file') group.add_option('--xpk', help=info) info = ('The manifest file with the detail description of the application. ' 'For example, --manifest=/path/to/your/manifest/file') group.add_option('--manifest', help=info) info = ('The url of application. ' 'This flag allows to package website as apk. For example, ' '--app-url=http://www.intel.com') group.add_option('--app-url', help=info) info = ('The root path of the web app. ' 'This flag allows to package local web app as apk. For example, ' '--app-root=/root/path/of/the/web/app') group.add_option('--app-root', help=info) info = ('The relative path of entry file based on the value from ' '\'app_root\'. This flag should work with \'--app-root\' together. ' 'For example, --app-local-path=/relative/path/of/entry/file') group.add_option('--app-local-path', help=info) parser.add_option_group(group) # Mandatory options group group = optparse.OptionGroup(parser, 'Mandatory arguments', 'They are used for describing the APK information through ' 'command line options.') info = ('The apk name. For example, --name="Your Application Name"') group.add_option('--name', help=info) info = ('The package name. For example, ' '--package=com.example.YourPackage') group.add_option('--package', help=info) parser.add_option_group(group) # Optional options group (alphabetical) group = optparse.OptionGroup(parser, 'Optional arguments', 'They are used for various settings for applications through ' 'command line options.') info = ('The version name of the application. ' 'For example, --app-version=1.0.0') group.add_option('--app-version', help=info) info = ('The version code of the application. ' 'For example, --app-versionCode=24') group.add_option('--app-versionCode', type='int', help=info) info = ('The version code base of the application. Version code will ' 'be made by adding a prefix based on architecture to the version ' 'code base. For example, --app-versionCodeBase=24') group.add_option('--app-versionCodeBase', type='int', help=info) info = ('The description of the application. For example, ' '--description=YourApplicationDescription') group.add_option('--description', help=info) group.add_option('--enable-remote-debugging', action='store_true', dest='enable_remote_debugging', default=False, help='Enable remote debugging.') group.add_option('--use-animatable-view', action='store_true', dest='use_animatable_view', default=False, help='Enable using animatable view (TextureView).') info = ('The list of external extension paths splitted by OS separators. ' 'The separators are \':\' , \';\' and \':\' on Linux, Windows and ' 'Mac OS respectively. For example, ' '--extensions=/path/to/extension1:/path/to/extension2.') group.add_option('--extensions', help=info) group.add_option('-f', '--fullscreen', action='store_true', dest='fullscreen', default=False, help='Make application fullscreen.') group.add_option('--keep-screen-on', action='store_true', default=False, help='Support keeping screen on') info = ('The path of application icon. ' 'Such as: --icon=/path/to/your/customized/icon') group.add_option('--icon', help=info) info = ('The orientation of the web app\'s display on the device. ' 'For example, --orientation=landscape. The default value is ' '\'unspecified\'. The permitted values are from Android: ' 'http://developer.android.com/guide/topics/manifest/' 'activity-element.html#screen') group.add_option('--orientation', help=info) info = ('The list of permissions to be used by web application. For example, ' '--permissions=geolocation:webgl') group.add_option('--permissions', help=info) info = ('Create an Android project directory with Crosswalk at this location.' ' (See project-only option below)') group.add_option('--project-dir', help=info) info = ('Must be used with project-dir option. Create an Android project ' 'directory with Crosswalk but do not build the APK package') group.add_option('--project-only', action='store_true', default=False, dest='project_only', help=info) info = ('Packaging tool will move the output APKs to the target directory') group.add_option('--target-dir', default=os.getcwd(), help=info) info = ('Use command lines.' 'Crosswalk is powered by Chromium and supports Chromium command line.' 'For example, ' '--xwalk-command-line=\'--chromium-command-1 --xwalk-command-2\'') group.add_option('--xwalk-command-line', default='', help=info) parser.add_option_group(group) # Keystore options group group = optparse.OptionGroup(parser, 'Keystore Options', 'The keystore is a signature from web developer, it\'s used when ' 'developer wants to distribute the applications.') info = ('The path to the developer keystore. For example, ' '--keystore-path=/path/to/your/developer/keystore') group.add_option('--keystore-path', help=info) info = ('The alias name of keystore. For example, --keystore-alias=name') group.add_option('--keystore-alias', help=info) info = ('The passcode of keystore. For example, --keystore-passcode=code') group.add_option('--keystore-passcode', help=info) info = ('Passcode for alias\'s private key in the keystore, ' 'For example, --keystore-alias-passcode=alias-code') group.add_option('--keystore-alias-passcode', help=info) info = ('Minify and obfuscate javascript and css.' '--compressor: compress javascript and css.' '--compressor=js: compress javascript.' '--compressor=css: compress css.') group.add_option('--compressor', dest='compressor', action='callback', callback=ParseParameterForCompressor, type='string', nargs=0, help=info) parser.add_option_group(group) options, _ = parser.parse_args() if len(argv) == 1: parser.print_help() return 0 if options.version: if os.path.isfile('VERSION'): print(GetVersion('VERSION')) return 0 else: parser.error('VERSION was not found, so Crosswalk\'s version could not ' 'be determined.') xpk_temp_dir = '' if options.xpk: xpk_name = os.path.splitext(os.path.basename(options.xpk))[0] xpk_temp_dir = os.path.join(tempfile.gettempdir(), xpk_name + '_xpk') CleanDir(xpk_temp_dir) ParseXPK(options, xpk_temp_dir) if options.app_root and not options.manifest: manifest_path = os.path.join(options.app_root, 'manifest.json') if os.path.exists(manifest_path): print('Using manifest.json distributed with the application.') options.manifest = manifest_path app_info = AppInfo() manifest = None if not options.manifest: # The checks here are really convoluted, but at the moment make_apk # misbehaves any of the following conditions is true. if options.app_url: # 1) --app-url must be passed without either --app-local-path or # --app-root. if options.app_root or options.app_local_path: parser.error('You must pass either "--app-url" or "--app-local-path" ' 'with "--app-root", but not all.') else: # 2) --app-url is not passed but only one of --app-local-path and # --app-root is set. if bool(options.app_root) != bool(options.app_local_path): parser.error('You must specify both "--app-local-path" and ' '"--app-root".') # 3) None of --app-url, --app-local-path and --app-root are passed. elif not options.app_root and not options.app_local_path: parser.error('You must pass either "--app-url" or "--app-local-path" ' 'with "--app-root".') if options.permissions: permission_list = options.permissions.split(':') else: print('Warning: all supported permissions on Android port are added. ' 'Refer to https://github.com/crosswalk-project/' 'crosswalk-website/wiki/Crosswalk-manifest') permission_list = permission_mapping_table.keys() options.permissions = HandlePermissionList(permission_list) options.icon_dict = {} else: try: manifest = ParseManifest(options) except
resave_all(self, without_mtime=False): self.save(without_mtime=without_mtime, no_propagate=True) for mc in self.meaningcontext_set.all(): mc.resave_all(without_mtime=without_mtime) for ex in self.examples: ex.resave_all(without_mtime=without_mtime) for m in self.child_meanings: m.resave_all(without_mtime=without_mtime) for cg in self.collogroups: cg.resave_all(without_mtime=without_mtime) def save(self, without_mtime=False, no_propagate=False, *args, **kwargs): self.substantivus_csl = apply_to_mixed(antconc_anticorrupt, self.substantivus_csl, CIVIL_IN_CSL_APPLY_TO_CSL) host_entry = self.host_entry if host_entry is not None: self.volume = host_entry.volume if self.looks_like_valency(host_entry): if self.gloss.strip() and not self.meaning.strip(): #::AUHACK:: self.meaning = self.gloss self.gloss = '' self.is_valency = True host = self.host if (self.numex > 0 and isinstance(host, CollocationGroup) and not host.phraseological and host_entry.template_version > 1 and not host_entry.restricted_use): self.numex = -self.numex # NOTE::INVNUMEX:: Приводим # к противоположному значению, чтобы иметь возможность вернуть # прежнее значение разрешенных примеров, если условия перестанут # соблюдаться. if (self.numex < 0 and ( not isinstance(host, CollocationGroup) or host.phraseological or host_entry.template_version < 2 or host_entry.restricted_use)): self.numex = -self.numex #::INVNUMEX:: super(Meaning, self).save(*args, **kwargs) if without_mtime: return if host_entry is not None and not no_propagate: host_entry.save(without_mtime=without_mtime) def delete(self, without_mtime=False, *args, **kwargs): super(Meaning, self).delete(*args, **kwargs) if without_mtime: return host_entry = self.host_entry if host_entry is not None: host_entry.save(without_mtime=without_mtime) def make_double(self, **kwargs): with transaction.atomic(): id1 = self.pk m2 = self m2.pk = None if 'entry' in kwargs and 'collogroup' not in kwargs: m2.entry_container = kwargs['entry'] if 'collogroup' in kwargs: m2.collogroup_container = kwargs['collogroup'] m2.save() m1 = Meaning.objects.get(pk=id1) m2m = ('meaningcontext_set', 'example_set', 'collocationgroup_set') kwargs['meaning'] = m2 return _double_check(m1, m2, m2m=m2m, kwargs=kwargs, model=Meaning) return None, None def get_url_fragment(self): return 'm{0}'.format(self.id) def __str__(self): return self.meaning def forJSON(self): _fields = ( 'additional_info', 'collogroup_container_id', 'entry_container_id', 'figurative', 'gloss', 'hidden', 'id', 'meaning', 'metaphorical', 'numex', 'order', 'parent_meaning_id', 'substantivus', 'substantivus_type', ) dct = dict((key, self.__dict__[key]) for key in _fields) dct['contexts'] = [c.forJSON() for c in self.meaningcontext_set.all()] dct['collogroups'] = [c.forJSON() for c in self.collogroups] dct['meanings'] = [m.forJSON() for m in self.child_meanings] dct['examples'] = [e.forJSON() for e in self.examples] return dct class Meta: verbose_name = 'значение' verbose_name_plural = 'ЗНАЧЕНИЯ' ordering = ('id',) class Example(models.Model, JSONSerializable, VolumeAttributive): meaning = ForeignKey(Meaning, verbose_name='значение', help_text='Значение, к которому относится данный пример.', blank=True, null=True, on_delete=models.SET_NULL) entry = ForeignKey(Entry, blank=True, null=True, on_delete=models.CASCADE) collogroup = ForeignKey('CollocationGroup', blank=True, null=True, on_delete=models.SET_NULL) order = SmallIntegerField('порядок следования', blank=True, default=345) hidden = BooleanField('Скрыть пример', default=False, editable=False, help_text='Не отображать данный пример при выводе словарной статьи.') dont_lowercase = BooleanField('Не менять регистр символов', help_text='''Не понижать регистр символов. При сохранении у всех примеров кроме использующих данный флаг автоматически заглавные прописные буквы заменяются на строчные. Данный флаг разрешено использовать только в статьях, описывающих единичные буквы.''', default=False, editable=False) wordform_example = BooleanField( 'Грамматическая/иная особенность', default=False) example = TextField('пример') ts_example = TextField(default='') @property def example_ucs(self): return ucs8(self.example) context = TextField('широкий контекст', help_text='Более широкий контекст для примера', blank=True) @property def context_ucs(self): c = self.context e = ucs8(self.example) if c: c = ucs8(c) x, y, z = c.partition(e) if y: # Разбиение дало положительный результат, # в "y" помещён сам пример. return (x, y, z) return ('', e, '') address_text = CharField('адрес', max_length=300, blank=True) @property def greek_equivs(self): return self.greq_set.all().order_by('order', 'id') @property def translations(self): return self.translation_set.all().order_by('order', 'id') def greek_equivs_with_numbers(self, show_info=False): # Если не надо отображать авторские комментарии, то выводим # только реальные греч. параллели с заполненным полем unitext # либо с пометой "в греч. иначе", остальные пропускаем. if show_info: lst = list(self.greek_equivs) else: lst = [ge for ge in self.greek_equivs if ge.unitext.strip() or ge.aliud] L = len(lst) if L == 0: groups = [] elif L == 1: groups = [(lst[0], 1)] else: groups = [] ge_prev = lst[0] n = 1 for ge in lst[1:]: if ge.unitext == ge_prev.unitext or ge.aliud and ge_prev.aliud: n += 1 else: groups.append((ge_prev, n)) ge_prev = ge n = 1 groups.append((ge_prev, n)) assert sum(x[1] for x in groups) == L, 'Число параллелей д.б постоянным' return groups audited = BooleanField('Пример прошел проверку или взят на проверку', default=False) audited_time = DateTimeField('Когда пример был проверен', blank=True, editable=False, null=True) note = TextField('комментарий', help_text='''Дополнительная информация по данному примеру, которая будет видна рядовому пользователю словаря''', blank=True) additional_info = TextField('примечание', help_text='''Любая дополнительная информация по данному ПРИМЕРУ. Дополнительная информация по значению или лексеме указывается не здесь, а в аналогичных полях при значении и лексеме, соответственно.''', blank=True) greek_eq_status = CharField('параллели', max_length=1, choices=constants.GREEK_EQ_STATUS, default=constants.GREEK_EQ_LOOK_FOR) mtime = DateTimeField(editable=False, auto_now=True) volume = SmallIntegerField('том', choices=constants.VOLUME_CHOICES, blank=True, null=True) @property def host_entry(self): if self.entry: return self.entry else: try: host_entry = self.meaning.host_entry except: return None else: return host_entry @property def host(self): if self.collogroup: return self.collogroup else: if self.meaning: return self.meaning.host else: return self.entry def starts_with(self, starts_with=constants.ANY_LETTER): host_entry = self.host_entry if host_entry: return host_entry.starts_with(starts_with) return False def example_for_admin(self): text = '' return mark_safe(text) def ts_convert(self): RE = re.compile( '[^' 'абвгдеєжѕзийіклмноѻпрстѹꙋуфхѿцчшщъыьѣюꙗѡѽѧѯѱѳѵ' 'АБВГДЕЄЖЗЅИЙІКЛМНОѺПРСТѸꙊУФХѾЦЧШЩЪЫЬѢЮꙖѠѼѦѮѰѲѴ' r'\~\'\`\^ı' ']+') ts_text = '' for word in re.split(RE, self.example): ts_word = word[:1].lower() if len(word) > 2: ts_word += word[1:-1] if len(word) > 1 and word[-1].lower() != 'ъ': ts_word += word[-1] ts_text += ts_word self.ts_example = civilrus_convert(ts_text) def angle_brackets(self): """ Унификация разных вариантов угловых скобок """ self.example = re.sub('[<\u3008\u2329\u29fc\u276c\u2770\u276e\uff1c]', '\u27e8', self.example) self.example = re.sub('[>\u3009\u232a\u29fd\u276d\u2771\u276f\uff1e]', '\u27e9', self.example) def lowercase_if_necessary(self): if self.dont_lowercase: return r = ( r'(?:^|(?<=[^' r'абвгдеєжѕзийіıклмноѻпрстѹꙋуфхѿцчшщъыьѣюꙗѡѽѧѯѱѳѵ' r'АБВГДЕЄЖЗЅИЙІКЛМНОѺПРСТѸУꙊФХѾЦЧШЩЪЫЬѢЮꙖѠѼѦѮѰѲѴ~' r"\^'`" r']))' r'([АБВГДЕЄѢЖЗЅИІѴЙКѮЛМНОѺѠѼѾПѰРСТУꙊѸФѲХЦЧШЩЪЫЬЮꙖѦ])' ) segments = re.split(r, self.example) if len(segments) > 1: example = '' for i, s in enumerate(segments): if i % 2 == 0: example += s elif s == 'Е': example += 'є' else: example += s.lower() self.example = example def get_url_fragment(self): return 'ex{0}'.format(self.id) def resave_all(self, without_mtime=False): self.save(without_mtime=without_mtime, no_propagate=True) for t in self.translation_set.all(): t.resave_all(without_mtime=without_mtime) for ge in self.greek_equivs: ge.resave_all(without_mtime=without_mtime) def save(self, without_mtime=False, no_propagate=False, *args, **kwargs): self.lowercase_if_necessary() self.angle_brackets() self.ts_convert() host_entry = self.host_entry if host_entry is not None: self.volume = host_entry.volume self.entry = host_entry host = self.host if host and 'base_meaning_id' in host.__dict__: self.collogroup = host super(Example, self).save(*args, **kwargs) if without_mtime: return host_entry = self.host_entry if host_entry is not None and not no_propagate: host_entry.save(without_mtime=without_mtime) if not self.address_text.strip() and len(self.example) < 10: # Отслеживаем странные случаи, когда в базе возникают примеры без # адреса и с текстом примера из нескольких повторяющихся букв # наподобие "ооо", "нннн". user = get_current_user() logger.error( '<Example id: %s, text: "%s">, ' 'is corrupted during <User: %s>´s session: ' '<Host Object: %s %s>, ' '<Host Entry id: %s, headword: %s>' % ( self.id, self.example, user and user.last_name or 'No current user', host.__class__.__name__, host.id, host_entry.id, host_entry.civil_equivalent)) def delete(self, without_mtime=False, *args, **kwargs): super(Example, self).delete(*args, **kwargs) if without_mtime: return host_entry = self.host_entry if host_entry is not None: host_entry.save(without_mtime=without_mtime) def make_double(self, **kwargs): with transaction.atomic(): id1 = self.pk ex2 = self ex2.pk = None if 'meaning' in kwargs: ex2.meaning = kwargs['meaning'] if 'entry' in kwargs: ex2.entry = kwargs['entry'] if 'collogroup' in kwargs: ex2.collogroup = kwargs['collogroup'] ex2.save() ex1 = Example.objects.get(pk=id1) m2m = ('greq_set', 'translation_set') kwargs['example'] = ex2 return _double_check(ex1, ex2, m2m=m2m, kwargs=kwargs, model=Example) return None, None def forJSON(self): _fields = ( 'additional_info', 'address_text', 'collogroup_id', 'context', 'dont_lowercase', 'entry_id', 'example', 'greek_eq_status', 'hidden', 'id', 'meaning_id', 'note', 'order', 'wordform_example', ) dct = dict((key, self.__dict__[key]) for key in _fields) dct['greqs'] = [ge.forJSON() for ge in self.greek_equivs] dct['translations'] = [t.forJSON() for t in self.translations] return dct def forHellinistJSON(self): data = { 'id': self.id, 'triplet': self.context_ucs, 'antconc': self.context.strip() or self.example, 'example': self.example, 'address': self.address_text, 'status': self.greek_eq_status, 'audited': self.audited_time and self.audited, 'comment': self.additional_info, 'greqs': [greq.forJSON() for greq in self.greek_equivs], } return data def toHellinistJSON(self): return json.dumps(self.forHellinistJSON(), ensure_ascii=False, separators=(',', ':')) def get_translations(self, fragmented, hidden): translations = self.translation_set.filter(fragmented=fragmented, hidden=hidden) if fragmented: translations = translations.order_by('fragment_end', 'order', 'id') data = defaultdict(list) for t in translations: if t.translation.strip(): data[t.fragment_end].append(t) else: data = tuple(t for t in translations.order_by('order', 'id') if t.translation.strip()) return data def __str__(self): return '(%s) %s' % (self.address_text, self.example) class Meta: verbose_name = 'пример' verbose_name_plural = 'ПРИМЕРЫ' ordering = ('id',) class Translation(models.Model, JSONSerializable, VolumeAttributive): for_example = ForeignKey(Example, related_name='translation_set', on_delete=models.CASCADE) fragmented = BooleanField('перевод только части примера', default=False) fragment_start = SmallIntegerField('номер слова начала фрагмента', blank=True, default=1) fragment_end = SmallIntegerField('номер слова конца фрагмента', blank=True, default=1000) source = CharField('Источник', max_length=1, choices=constants.TRANSLATION_SOURCE_CHOICES, default=constants.TRANSLATION_SOURCE_DEFAULT) order = SmallIntegerField('порядок следования', blank=True, default=345) hidden = BooleanField('скрывать перевод', default=True, help_text='отображать перевод только в комментариях для авторов') translation = TextField('перевод') additional_info = TextField('примечание', blank=True) volume = SmallIntegerField('том', choices=constants.VOLUME_CHOICES, blank=True, null=True) def source_label(self): return constants.TRANSLATION_SOURCE_TEXT.get(self.source, '') @property def translation_fu(self): return '%s%s' % (self.translation[0:1].upper(), self.translation[1:]) @property def host_entry(self): try: host_entry = self.for_example.host_entry except: return
<reponame>leonardt/veriloggen from __future__ import absolute_import from __future__ import print_function import sys import os import collections import tempfile import veriloggen.core.vtypes as vtypes import veriloggen.core.module as module import veriloggen.core.function as function import veriloggen.core.task as task import pyverilog.vparser.ast as vast from pyverilog.vparser.parser import VerilogCodeParser from pyverilog.dataflow.modulevisitor import ModuleVisitor from pyverilog.ast_code_generator.codegen import ASTCodeGenerator #------------------------------------------------------------------------- # User interfaces to read Verilog source code #------------------------------------------------------------------------- def read_verilog_stubmodule(*filelist, **opt): module_dict = to_module_dict(*filelist, **opt) codegen = ASTCodeGenerator() stubs = collections.OrderedDict() for name, m in module_dict.items(): description = vast.Description((m,)) source = vast.Source('', description) code = codegen.visit(source) stubs[name] = module.StubModule(name, code=code) return stubs def read_verilog_module(*filelist, **opt): module_dict = to_module_dict(*filelist, **opt) visitor = VerilogReadVisitor(module_dict) for name, m in module_dict.items(): visitor.visit(m) modules = visitor.converted_modules return modules def read_verilog_module_str(code, encode='utf-8'): tmp = tempfile.NamedTemporaryFile() tmp.write(code.encode(encode)) tmp.read() filename = tmp.name ret = read_verilog_module(filename) tmp.close() return ret def read_verilog_stubmodule_str(code, encode='utf-8'): tmp = tempfile.NamedTemporaryFile() tmp.write(code.encode(encode)) tmp.read() filename = tmp.name ret = read_verilog_stubmodule(filename) tmp.close() return ret #------------------------------------------------------------------------- def to_module_dict(*filelist, **opt): ast = to_ast(*filelist, **opt) module_visitor = ModuleVisitor() module_visitor.visit(ast) module_names = module_visitor.get_modulenames() moduleinfotable = module_visitor.get_moduleinfotable() moduleinfo = moduleinfotable.getDefinitions() module_dict = collections.OrderedDict( [(n, d.definition) for n, d in moduleinfo.items()]) return module_dict #------------------------------------------------------------------------- def to_ast(*filelist, **opt): include = opt['include'] if 'include' in opt else () define = opt['define'] if 'define' in opt else () if not isinstance(include, tuple) and not isinstance(include, list): raise TypeError('"include" option of read_verilog must be tuple or list, not %s' % type(include)) if not isinstance(include, tuple) and not isinstance(include, list): raise TypeError('"include" option of read_verilog must be tuple or list, not %s' % type(include)) code_parser = VerilogCodeParser(filelist, preprocess_include=include, preprocess_define=define) ast = code_parser.parse() return ast #------------------------------------------------------------------------- def to_tuple(s): if not isinstance(s, (list, tuple)): return tuple([s]) return s #------------------------------------------------------------------------- class ReadOnlyModule(object): def __init__(self, m): self.m = m def __getattr__(self, attr): return getattr(self.m, attr) #------------------------------------------------------------------------- class VerilogReadVisitor(object): def __init__(self, ast_module_dict, converted_modules=None): self.ast_module_dict = ast_module_dict self.converted_modules = (collections.OrderedDict() if converted_modules is None else converted_modules) self.m = None self.module_stack = [] def get_module(self, name): if name in self.converted_modules: return self.converted_modules[name] if name not in self.ast_module_dict: return module.StubModule(name) visitor = VerilogReadVisitor( self.ast_module_dict, self.converted_modules) mod = visitor.visit(self.ast_module_dict[name]) self.converted_modules[name] = mod self.converted_modules.update(visitor.converted_modules) return mod def push_module(self, m): self.module_stack.append(self.m) self.m = m def push_read_only_module(self): self.push_module(ReadOnlyModule(self.m)) def pop_module(self): self.m = self.module_stack.pop() def add_object(self, obj): if isinstance(self.m, module.Module): self.m.add_object(obj) if isinstance(obj, vtypes._Variable): obj.module = self.m def generic_visit(self, node): for c in node.children(): self.visit(c) #raise TypeError("Unsupported object '%s'" % str(type(node))) def visit(self, node): method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) return visitor(node) def visit_ModuleDef(self, node): # check module cache if node.name in self.converted_modules: return self.converted_modules[node.name] # create new Verilog module m = module.Module(node.name) self.push_module(m) self.generic_visit(node) self.pop_module() self.converted_modules[node.name] = m return m def visit_Paramlist(self, node): params = [] for param in node.params: p = self.visit(param) params.append(p) return params def visit_Portlist(self, node): ports = [] for port in node.ports: p = self.visit(port) ports.append(p) return ports def visit_Port(self, node): if node.type is None: name = node.name p = vtypes.AnyType(name=name) self.add_object(p) return p name = node.name width = self.visit(node.width) if node.width is not None else None _type = getattr(vtypes, node.type, None) if _type is None: raise TypeError("No such port type '%s'" % node.type) p = _type(name, width) self.add_object(p) return p def visit_Width(self, node): msb = self.visit(node.msb) width = msb + 1 return width def visit_Length(self, node): lsb = self.visit(node.lsb) length = lsb + 1 return length def visit_Identifier(self, node): if node.scope is not None: labels = self.visit(node.scope) labels.append(node.name) return vtypes.Scope(*labels) if not isinstance(self.m, (module.Module, ReadOnlyModule)): return vtypes.AnyType(name=node.name) ret = self.m.find_identifier(node.name) if ret is None: return vtypes.AnyType(name=node.name) if ret.name in self.m.variable: return self.m.variable[ret.name] return ret def visit_IntConst(self, node): return vtypes.Int(node.value) def visit_FloatConst(self, node): return vtypes.Float(node.value) def visit_StringConst(self, node): return vtypes.Str(node.value) def visit_Input(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Input(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Output(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Output(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Inout(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Inout(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Tri(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Tri(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Wire(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Wire(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Reg(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Reg(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_WireArray(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None length = self.visit(node.length) signed = node.signed obj = vtypes.Wire(width, length=length, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) obj._set_raw_length(self.visit(node.length.msb), self.visit(node.length.lsb)) self.add_object(obj) return obj def visit_RegArray(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None length = self.visit(node.length) signed = node.signed obj = vtypes.Reg(width, length=length, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) obj._set_raw_length(self.visit(node.length.msb), self.visit(node.length.lsb)) self.add_object(obj) return obj def visit_Integer(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None signed = node.signed obj = vtypes.Integer(width, signed=signed, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Real(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None obj = vtypes.Real(width, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Genvar(self, node): name = node.name width = self.visit(node.width) if node.width is not None else None obj = vtypes.Genvar(width, name=name) if node.width is not None: obj._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(obj) return obj def visit_Ioport(self, node): first = self.visit(node.first) second = self.visit(node.second) if node.second is not None else None return (first, second) def visit_Parameter(self, node): name = node.name value = self.visit(node.value) width = self.visit(node.width) if node.width is not None else None signed = node.signed param = vtypes.Parameter(value, width, signed, name=name) if node.width is not None: param._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(param) return param def visit_Localparam(self, node): name = node.name value = self.visit(node.value) width = self.visit(node.width) if node.width is not None else None signed = node.signed param = vtypes.Localparam(value, width, signed, name=name) if node.width is not None: param._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(param) return param def visit_Supply(self, node): name = node.name value = self.visit(node.value) width = self.visit(node.width) if node.width is not None else None signed = node.signed param = vtypes.Supply(value, width, signed, name=name) if node.width is not None: param._set_raw_width(self.visit(node.width.msb), self.visit(node.width.lsb)) self.add_object(param) return param def visit_Decl(self, node): decl = [self.visit(d) for d in node.list] return decl def visit_Concat(self, node): vars = [self.visit(var) for var in node.list] return vtypes.Cat(*vars) def visit_LConcat(self, node): vars = [self.visit(var) for var in node.list] return vtypes.Cat(*vars) def visit_Repeat(self, node): var = self.visit(node.value) times = self.visit(node.times) return vtypes.Repeat(var, times) def visit_Partselect(self, node): var = self.visit(node.var) msb = self.visit(node.msb) lsb = self.visit(node.lsb) return vtypes.Slice(var, msb, lsb) def visit_Pointer(self, node): var = self.visit(node.var) pos = self.visit(node.ptr) return vtypes.Pointer(var, pos) def visit_Lvalue(self, node): return self.visit(node.var) def visit_Rvalue(self, node): return self.visit(node.var) def visit_Uplus(self, node): return vtypes.Uplus(self.visit(node.right)) def visit_Uminus(self, node): return vtypes.Uminus(self.visit(node.right)) def visit_Ulnot(self, node): return vtypes.Ulnot(self.visit(node.right)) def visit_Unot(self, node): return vtypes.Unot(self.visit(node.right)) def visit_Uand(self, node): return vtypes.Uand(self.visit(node.right)) def visit_Unand(self, node): return vtypes.Unand(self.visit(node.right)) def visit_Uor(self, node): return vtypes.Uor(self.visit(node.right)) def visit_Unor(self, node): return vtypes.Unor(self.visit(node.right)) def visit_Uxor(self, node): return vtypes.Uxor(self.visit(node.right)) def visit_Uxnor(self, node): return vtypes.Uxnor(self.visit(node.right)) def visit_Power(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Power(left, right) def visit_Times(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Times(left, right) def visit_Divide(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Divide(left, right) def visit_Mod(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Mod(left, right) def visit_Plus(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Plus(left, right) def visit_Minus(self, node): left = self.visit(node.left) right = self.visit(node.right) return vtypes.Minus(left, right) def visit_Sll(self, node):
<gh_stars>0 """ Defines the ComposedPOVM class """ #*************************************************************************************************** # Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** import collections as _collections from pygsti.modelmembers.povms.composedeffect import ComposedPOVMEffect as _ComposedPOVMEffect from pygsti.modelmembers.povms.computationalpovm import ComputationalBasisPOVM as _ComputationalBasisPOVM from pygsti.modelmembers.povms.povm import POVM as _POVM from pygsti.modelmembers import modelmember as _mm from pygsti.modelmembers import operations as _op from pygsti.baseobjs import Basis as _Basis class ComposedPOVM(_POVM): """ TODO: update docstring A POVM that is effectively a *single* Lindblad-parameterized gate followed by a computational-basis POVM. Parameters ---------- errormap : MapOperator The error generator action and parameterization, encapsulated in a gate object. Usually a :class:`LindbladOp` or :class:`ComposedOp` object. (This argument is *not* copied, to allow ComposedPOVMEffects to share error generator parameters with other gates and spam vectors.) povm : POVM, optional A sub-POVM which supplies the set of "reference" effect vectors that `errormap` acts on to produce the final effect vectors of this LindbladPOVM. This POVM must be *static* (have zero parameters) and its evolution type must match that of `errormap`. If None, then a :class:`ComputationalBasisPOVM` is used on the number of qubits appropriate to `errormap`'s dimension. mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for this spam vector. Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt) (or a custom basis object). If None, then this is extracted (if possible) from `errormap`. """ def __init__(self, errormap, povm=None, mx_basis=None): """ Creates a new LindbladPOVM object. Parameters ---------- errormap : MapOperator The error generator action and parameterization, encapsulated in a gate object. Usually a :class:`LindbladOp` or :class:`ComposedOp` object. (This argument is *not* copied, to allow ComposedPOVMEffects to share error generator parameters with other gates and spam vectors.) povm : POVM, optional A sub-POVM which supplies the set of "reference" effect vectors that `errormap` acts on to produce the final effect vectors of this LindbladPOVM. This POVM must be *static* (have zero parameters) and its evolution type must match that of `errormap`. If None, then a :class:`ComputationalBasisPOVM` is used on the number of qubits appropriate to `errormap`'s dimension. mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for this spam vector. Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt) (or a custom basis object). If None, then this is extracted (if possible) from `errormap`. """ self.error_map = errormap state_space = self.error_map.state_space if mx_basis is None: if isinstance(errormap, _op.ExpErrorgenOp) and isinstance(errormap.errorgen, _op.LindbladErrorgen): mx_basis = errormap.errorgen.matrix_basis else: raise ValueError("Cannot extract a matrix-basis from `errormap` (type %s)" % str(type(errormap))) self.matrix_basis = _Basis.cast(mx_basis, state_space) evotype = self.error_map._evotype if povm is None: assert(state_space.num_qubits >= 0), \ ("A default computational-basis POVM can only be used with an" " integral number of qubits!") povm = _ComputationalBasisPOVM(state_space.num_qubits, evotype) else: assert(povm.evotype == evotype), \ ("Evolution type of `povm` (%s) must match that of " "`errormap` (%s)!") % (povm.evotype, evotype) assert(povm.num_params == 0), \ "Given `povm` must be static (have 0 parameters)!" self.base_povm = povm items = [] # init as empty (lazy creation of members) _POVM.__init__(self, state_space, evotype, items) def __contains__(self, key): """ For lazy creation of effect vectors """ return bool(key in self.base_povm) def __iter__(self): return self.keys() def __len__(self): return len(self.base_povm) def keys(self): """ An iterator over the effect (outcome) labels of this POVM. """ for k in self.base_povm.keys(): yield k def values(self): """ An iterator over the effect SPAM vectors of this POVM. """ for k in self.keys(): yield self[k] def items(self): """ An iterator over the (effect_label, effect_vector) items in this POVM. """ for k in self.keys(): yield k, self[k] def __getitem__(self, key): """ For lazy creation of effect vectors """ if _collections.OrderedDict.__contains__(self, key): return _collections.OrderedDict.__getitem__(self, key) elif key in self: # calls __contains__ to efficiently check for membership #create effect vector now that it's been requested (lazy creation) pureVec = self.base_povm[key] effect = _ComposedPOVMEffect(pureVec, self.error_map) effect.set_gpindices(self.error_map.gpindices, self.parent) # initialize gpindices of "child" effect (should be in simplify_effects?) _collections.OrderedDict.__setitem__(self, key, effect) return effect else: raise KeyError("%s is not an outcome label of this LindbladPOVM" % key) def __reduce__(self): """ Needed for OrderedDict-derived classes (to set dict items) """ return (ComposedPOVM, (self.error_map.copy(), self.base_povm.copy(), self.matrix_basis), {'_gpindices': self._gpindices}) # preserve gpindices (but not parent) def allocate_gpindices(self, starting_index, parent, memo=None): """ Sets gpindices array for this object or any objects it contains (i.e. depends upon). Indices may be obtained from contained objects which have already been initialized (e.g. if a contained object is shared with other top-level objects), or given new indices starting with `starting_index`. Parameters ---------- starting_index : int The starting index for un-allocated parameters. parent : Model or ModelMember The parent whose parameter array gpindices references. memo : set, optional Used to prevent duplicate calls and self-referencing loops. If `memo` contains an object's id (`id(self)`) then this routine will exit immediately. Returns ------- num_new : int The number of *new* allocated parameters (so the parent should mark as allocated parameter indices `starting_index` to `starting_index + new_new`). """ if memo is None: memo = set() if id(self) in memo: return 0 memo.add(id(self)) assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm num_new_params = self.error_map.allocate_gpindices(starting_index, parent, memo) # *same* parent as self _mm.ModelMember.set_gpindices( self, self.error_map.gpindices, parent) return num_new_params def submembers(self): """ Get the ModelMember-derived objects contained in this one. Returns ------- list """ return [self.error_map] def relink_parent(self, parent): # Unnecessary? """ Sets the parent of this object *without* altering its gpindices. In addition to setting the parent of this object, this method sets the parent of any objects this object contains (i.e. depends upon) - much like allocate_gpindices. To ensure a valid parent is not overwritten, the existing parent *must be None* prior to this call. Parameters ---------- parent : Model or ModelMember The parent of this POVM. Returns ------- None """ self.error_map.relink_parent(parent) _mm.ModelMember.relink_parent(self, parent) def set_gpindices(self, gpindices, parent, memo=None): """ Set the parent and indices into the parent's parameter vector that are used by this ModelMember object. Parameters ---------- gpindices : slice or integer ndarray The indices of this objects parameters in its parent's array. parent : Model or ModelMember The parent whose parameter array gpindices references. memo : dict, optional A memo dict used to avoid circular references. Returns ------- None """ if memo is None: memo = set() elif id(self) in memo: return memo.add(id(self)) assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm self.error_map.set_gpindices(gpindices, parent, memo) self.terms = {} # clear terms cache since param indices have changed now _mm.ModelMember._set_only_my_gpindices(self, gpindices, parent) def simplify_effects(self, prefix=""): """ Creates a dictionary of simplified effect vectors. Returns a dictionary of effect POVMEffects that belong to the POVM's parent `Model` - that is, whose `gpindices` are set to all or a subset of this POVM's gpindices. Such effect vectors are used internally within computations involving the parent `Model`. Parameters ---------- prefix : str A string, usually identitying this POVM, which may be used to prefix the simplified gate keys. Returns ------- OrderedDict of POVMEffects """ # Create "simplified" effect vectors, which infer their parent and # gpindices from the set of "factor-POVMs" they're constructed with. if prefix: prefix += "_" simplified = _collections.OrderedDict( [(prefix + k, self[k]) for k in self.keys()]) return simplified @property def parameter_labels(self): """ An array of labels (usually strings) describing this model member's parameters. """ return self.error_map.parameter_labels @property def num_params(self): """ Get the number of independent parameters which specify this POVM. Returns ------- int the number of independent parameters. """ # Recall self.base_povm.num_params == 0 return self.error_map.num_params def to_vector(self): """ Extract a vector of the underlying gate parameters from this POVM. Returns -------
entries to the next workq file steps_to_scramble = " ".join(reverse_steps(steps_to_solve.split())) if self.use_edges_pattern: workq_line = f"{pattern}:{state}:{steps_to_scramble}" else: workq_line = f"{state}:{steps_to_scramble}" to_write.append(workq_line + " " * (workq_line_length - len(workq_line)) + "\n") to_write_count += 1 self.workq_size += 1 if to_write_count >= 10000: fh_workq_next.write("".join(to_write)) to_write = [] to_write_count = 0 if to_write_count: fh_workq_next.write("".join(to_write)) to_write = [] to_write_count = 0 else: with open(self.workq_filename_next, "w") as fh_workq_next: pass if pruned: log.warning(f"kept {kept:,}, pruned {pruned:,}") self.time_in_building_workq += (dt.datetime.now() - start_time).total_seconds() log.info("building next workq file end") # Now merge the lookup-table.txt we built in the previous levels with the .new file # Both are sorted so we can use the --merge option if os.path.exists(self.filename): log.info("sort --merge our current lookup-table.txt file with the .20-new-states file begin") start_time = dt.datetime.now() subprocess.check_output( "LC_ALL=C nice sort --parallel=%d --merge --temporary-directory=%s --output %s.30-final %s %s.20-new-states" % (self.cores, FAST_TMP, self.workq_filename, self.filename, self.workq_filename), shell=True, ) self.time_in_sort += (dt.datetime.now() - start_time).total_seconds() log.info("sort --merge our current lookup-table.txt file with the .20-new-states file end") else: subprocess.check_output( f"cp {self.workq_filename}.20-new-states {self.workq_filename}.30-final", shell=True ) log.info("move files begin") start_time = dt.datetime.now() os.remove(f"{self.workq_filename}.10-results") os.remove(f"{self.workq_filename}.20-new-states") shutil.move(f"{self.workq_filename}.30-final", self.filename) # mv the next workq to be the current workq shutil.move(self.workq_filename_next, self.workq_filename) self.time_in_file_delete += (dt.datetime.now() - start_time).total_seconds() log.info("move files end") # We have finished this depth of the search, update our stats and print them self.stats[self.depth] = new_states_count log.warning(f"{self.index}: finished depth {self.depth}, workq size {self.workq_size:,}") def search(self, max_depth, cores): """ This is where the magic happens """ self.index = 0 self.stats = {0: 0} self.cores = cores self._search_setup() while True: self._search_launch_builder_crunch_workq_per_core() self._search_process_builder_crunch_workq_results(max_depth) self.depth += 1 self.log_table_stats() # If the workq file is empty our search is complete if not os.path.getsize(self.workq_filename): os.remove(self.workq_filename) break def save_starting_states(self): patterns = [] to_write = [] with open(self.filename, "r") as fh_read: for line in fh_read: if self.use_edges_pattern: (pattern, cube_state_string, steps) = line.rstrip().split(":") self.cube.state = list(cube_state_string) else: (cube_state_string, steps) = line.rstrip().split(":") self.cube.state = list(cube_state_string) if self.use_edges_pattern: patterns.append(pattern) if self.name == "5x5x5-edges-solve-second-four": if ( self.cube.state[53] != "F" or self.cube.state[73] != "F" or self.cube.state[103] != "B" or self.cube.state[123] != "B" ): continue to_write.append(f" ('{cube_state_string[1:]}', 'ULFRBD'),") for step in self.rotations: self.cube.state = list(cube_state_string) self.cube.rotate(step) # self.cube.print_cube() to_write.append(f" ('{''.join(self.cube.state[1:])}', 'ULFRBD'),") with open(f"{self.filename}.starting-states", "w") as fh_final: to_write.sort() fh_final.write("\n".join(to_write) + "\n") log.info("wrote %d starting states" % len(to_write)) to_write = [] with open(f"{self.filename}.starting-states", "r") as fh_read: for line in fh_read: (state, order) = line.strip().split("', '") # remove the leading (' state = state[2:] state = state.replace(".", "") if self.store_as_hex: state = convert_state_to_hex(state) to_write.append(f"'{state}',") with open(f"{self.filename}.starting-states.compact", "w") as fh: to_write.sort() fh.write("\n".join(to_write) + "\n") if self.use_edges_pattern: print("state_target patterns:\n%s\n\n" % "\n".join(patterns)) shutil.move(f"{self.filename}.starting-states", self.filename) def save(self): start_time = dt.datetime.now() to_write = [] to_write_count = 0 # Convert the states in our lookup-table to their smaller format...basically # remove all of the '.'s and if convert to hex (if requested). log.info(f"{self}: save() begin") log.info(f"{self}: convert state to smaller format, file {self.filename}") with open(f"{self.filename}.small", "w") as fh_final: with open(self.filename, "r") as fh_read: if self.use_edges_pattern: for line in fh_read: (pattern, cube_state_string, steps) = line.rstrip().split(":") pattern = pattern.replace(".", "") self.cube.state = list(cube_state_string) if self.lt_centers_max_depth: centers = "".join([self.cube.state[x] for x in centers_555]) # Only keep the entries where centers are solved if centers != "UUUUUUUUULLLLLLLLLFFFFFFFFFRRRRRRRRRBBBBBBBBBDDDDDDDDD": continue pattern = pattern.replace("UUUUUUUUULLLLLLLLLFFFFFFFFFRRRRRRRRRBBBBBBBBBDDDDDDDDD", "") to_write.append(f"{pattern}:{steps}") to_write_count += 1 if to_write_count >= WRITE_BATCH_SIZE: fh_final.write("\n".join(to_write) + "\n") to_write = [] to_write_count = 0 elif self.use_centers_then_edges: for line in fh_read: (cube_state_string, steps) = line.rstrip().split(":") self.cube.state = list(cube_state_string) if self.size == "4x4x4": centers = "".join([self.cube.state[x] for x in centers_444]) edges = "".join([self.cube.state[x] for x in edges_444]) centers = centers.replace(".", "") edges = edges.replace(".", "") if self.store_as_hex: centers = convert_state_to_hex(centers) edges = convert_state_to_hex(edges) elif self.size == "5x5x5": centers = "".join([self.cube.state[x] for x in centers_555]) edges = "".join([self.cube.state[x] for x in edges_555]) centers = centers.replace(".", "") edges = edges.replace(".", "") if self.store_as_hex: centers = convert_state_to_hex(centers) edges = convert_state_to_hex(edges) else: raise Exception(f"Add support for {self.size}") to_write.append(f"{centers}{edges}:{steps}") to_write_count += 1 if to_write_count >= WRITE_BATCH_SIZE: fh_final.write("\n".join(to_write) + "\n") to_write = [] to_write_count = 0 else: lt_centers_max_depth = self.lt_centers_max_depth store_as_hex = self.store_as_hex for line in fh_read: (cube_state_string, steps) = line.rstrip().split(":") if lt_centers_max_depth: self.cube.state = list(cube_state_string) edges = "".join([self.cube.state[x] for x in edges_555]) edges = edges.replace("-", "x") centers = "".join([self.cube.state[x] for x in centers_555]) if centers != "UUUUUUUUULLLLLLLLLFFFFFFFFFRRRRRRRRRBBBBBBBBBDDDDDDDDD": continue cube_state_string_small = edges else: cube_state_string_small = cube_state_string[1:].replace(".", "") if store_as_hex: cube_state_string_small = convert_state_to_hex(cube_state_string_small) to_write.append(f"{cube_state_string_small}:{steps}") to_write_count += 1 if to_write_count >= WRITE_BATCH_SIZE: fh_final.write("\n".join(to_write)) fh_final.write("\n") to_write = [] to_write_count = 0 if to_write_count: fh_final.write("\n".join(to_write)) fh_final.write("\n") to_write = [] to_write_count = 0 shutil.move(f"{self.filename}.small", self.filename) files_to_pad = (self.filename,) for filename in files_to_pad: log.info(f"{self}: pad the file") subprocess.check_output(f"nice ./utils/pad-lines.py {filename}", shell=True) # Check to see if the file is already sorted before we spend the cycles to sort it try: log.info(f"{self}: sort --check") subprocess.check_output(f"LC_ALL=C nice sort --check {filename}", shell=True) except subprocess.CalledProcessError: log.info(f"{self}: sort the file") subprocess.check_output( "LC_ALL=C nice sort --parallel=%d --temporary-directory=%s --output=%s %s" % (self.cores, FAST_TMP, filename, filename), shell=True, ) log.info(f"{self}: build histogram") subprocess.check_output(f"nice ./utils/print-histogram.py {filename} >> histogram.txt", shell=True) if self.use_cost_only: log.info(f"{self}: build cost-only copy of file") convert_to_cost_only(filename) elif self.use_hash_cost_only: log.info(f"{self}: build hash-cost-only copy of file") convert_to_hash_cost_only(filename, self.bucketcount) log.info(f"{self}: save() end") self.time_in_save = (dt.datetime.now() - start_time).total_seconds() def get_starting_states(self, use_hex, use_edges_pattern): if self.starting_cube_states: foo = [] for (state, state_type) in self.starting_cube_states: if state_type == "ULFRBD": if use_edges_pattern: self.cube.state = ["x"] + list(state) if self.size == "5x5x5": state = edges_recolor_pattern_555(self.cube.state[:]) state = "".join([state[index] for index in wings_for_edges_pattern_555]) elif self.size == "4x4x4": state = edges_recolor_pattern_444(self.cube.state[:]) state = "".join([state[index] for (_, index, _) in wings_for_edges_recolor_pattern_444]) else: raise Exception(f"use_edges_pattern not supported for {self.size}") else: state = "".join(state.split()).strip().replace(".", "") if use_hex: state = convert_state_to_hex(state) foo.append(' "' + state + '"') elif state_type == "ascii": # do this later pass else: raise Exception(f"{state_type} is an invalid state_type") foo.sort() starting_states = ",\n".join(foo) else: class_name = type(self).__name__.replace("Build", "LookupTable") starting_states = get_starting_states(self.filename, class_name, None) return starting_states def _code_gen_lookup_table(self): class_name = type(self).__name__.replace("Build", "LookupTable") (histogram, linecount, max_depth) = parse_histogram(self.filename) starting_states = self.get_starting_states(self.store_as_hex, self.use_edges_pattern) filename_minus_directory = self.filename.split("/")[1] print( ''' class %s(LookupTable): """ %s """ # fmt: off state_targets = ( %s ) # fmt: on def __init__(self, parent, build_state_index: bool = False): LookupTable.__init__( self, parent, \"%s\", self.state_targets, linecount=%d, max_depth=%d, all_moves=moves_%s, # fmt: off illegal_moves=( "%s" ), # fmt: on use_state_index=True, build_state_index=build_state_index, ) def state(self): return "".join([self.parent.state[x] for x in CUBE_POSITION_LIST]) return "".join(["U" if self.parent.state[x] in ("U", "D") else "x" for x in CUBE_POSITION_LIST]) def populate_cube_from_state(self, state, cube, steps_to_solve): state = list(state) for (pos, pos_state) in zip(CUBE_POSITION_LIST, state): cube[pos] = pos_state ''' % ( class_name, histogram, starting_states, filename_minus_directory, linecount, max_depth, self.size.replace("x", ""), '",\n "'.join(self.illegal_moves), ) ) def _code_gen_lookup_table_ida(self): class_name = type(self).__name__.replace("Build", "LookupTableIDA") (histogram, linecount, max_depth) = parse_histogram(self.filename) starting_states = self.get_starting_states(self.store_as_hex, self.use_edges_pattern) print( ''' class %s(LookupTableIDA): """ %s """ state_targets = ( %s ) def __init__(self, parent): LookupTableIDA.__init__( self, parent, '%s', self.state_targets, moves_%s, # illegal moves (TBD), linecount=%d, max_depth=%d, filesize=%d) def ida_heuristic(self): parent_state = self.parent.state''' % ( class_name, histogram, starting_states, self.filename, self.size.replace("x", ""), linecount, max_depth, os.path.getsize(self.filename), ) ) if self.store_as_hex: print( " lt_state = ''.join(['1' if parent_state[x] in (foo, bar) else '0' for x in TBD_%s])" % self.size.replace("x", "") ) print(" lt_state = self.hex_format % int(lt_state, 2)\n\n") elif self.use_edges_pattern: print(f" state = edges_recolor_pattern_{self.size.replace('x', '')}(parent_state[:])") print( " edges_state = ''.join([state[index] for index in wings_for_edges_pattern_%s])" % self.size.replace("x", "") ) print(" lt_state = edges_state") else: print(f" lt_state = ''.join([parent_state[x] for x in TBD_{self.size.replace('x', '')}])") print(" cost_to_goal = max(foo_cost, bar_cost)") print(" return (lt_state, cost_to_goal)\n\n") def code_gen(self): if "0.txt" in self.filename: first_prune_table_filename = self.filename.replace("0.txt", "1.txt").replace( "lookup-table", "starting-states-lookup-table" ) # if os.path.exists(first_prune_table_filename): if True or os.path.exists(first_prune_table_filename): log.info(f"prune table {first_prune_table_filename} does exist") self._code_gen_lookup_table_ida() else: log.info(f"prune table {first_prune_table_filename} does NOT exist") self._code_gen_lookup_table() else: self._code_gen_lookup_table() def search_new(self, max_depth=99, cores=4): workq = deque() table = {} # seed the workq for cube in self.starting_cubes: cube_state_minus_x = "".join(cube.state[1:]) table[cube_state_minus_x] = [] for step in self.legal_moves: workq.append((cube_state_minus_x, [step])) index = 0 max_depth = 5 log.info(f"max_depth {max_depth}") while workq: (state, steps_to_scramble) = workq.popleft() # log.info(f"{index}: {state}, {steps_to_scramble}") debug = False """ if len(steps_to_scramble) >= 3 and steps_to_scramble[0] == "Lw" and steps_to_scramble[1] == "U'" and steps_to_scramble[2] == "3Bw2": log.info(f"{index}: {steps_to_scramble}") debug = True if debug: log.info(state) """ cube.state
<reponame>TheFarGG/qord # MIT License # Copyright (c) 2022 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import annotations from qord.models.base import BaseModel from qord.models.users import User from qord._helpers import parse_iso_timestamp, create_cdn_url, UNDEFINED, BASIC_EXTS from datetime import datetime import typing if typing.TYPE_CHECKING: from qord.models.roles import Role from qord.models.guilds import Guild from qord.flags.users import UserFlags def _user_features(cls): ignore = ( "avatar", "name", "is_avatar_animated", "avatar_url", ) def _create_property(name: str) -> property: def getter(self: GuildMember): return getattr(self.user, name) getter.__name__ = name getter.__doc__ = f"Shorthand property for :attr:`User.{name}`." return property(getter) for attr in User.__slots__: if ( attr in ignore or attr.startswith("_") or attr in cls.__dict__ ): continue setattr(cls, attr, _create_property(attr)) for attr in User.__dict__: if ( attr in ignore or attr.startswith("_") or attr in cls.__dict__ ): continue setattr(cls, attr, _create_property(attr)) return cls @_user_features class GuildMember(BaseModel): r"""Representation of a guild member. A guild member is simply a user that is part of a specific :class:`Guild`. Every guild member has an underlying :class:`User` object attached to it. .. note:: This class provides shorthand properties to access the underlying user's data however certain properties like :attr:`.name` and :attr:`.avatar` have different behaviour in this class. For example, :attr:`.avatar` and other avatar related methods and attributes also consider the guild specific avatar of member for relevant functionality with addition to user's global avatar. Attributes ---------- guild: :class:`Guild` The parent guild that this member belongs to. user: :class:`User` The user associated to this member. nickname: Optional[:class:`builtins.str`] The nickname of this member in the guild. If member has no guild specific nickname set, This is ``None``. See :attr:`.display_name` property that aids in retrieving the name more efficiently. guild_avatar: Optional[:class:`builtins.str`] The hash of avatar for this member in the guild. If member has no guild specific avatar set, This is ``None``. See :attr:`.display_avatar` property that aids in retrieving the avatar more efficiently. deaf: :class:`builtins.bool` Whether the member is deafened in voice channels. mute: :class:`builtins.bool` Whether the member is muted in voice channels. pending: :class:`builtins.bool` Whether the member has passed the membership screening. joined_at: :class:`datetime.datetime` The time when member joined the guild. premium_since: Optional[:class:`datetime.datetime`] The time when member started boosting the guild if applicable. If member is not boosting the guild, This is ``None``. timeout_until: Optional[:class:`datetime.datetime`] The time until which member is timed out and cannot interact with the guild. If member is not timed out, This is ``None``. .. note:: This attribute may have a value set even if member is not actually timed out. In which case, The datetime object would be in past. See :meth:`.is_timed_out` check that covers all possible cases. role_ids: List[:class:`builtins.int`] The list of IDs of roles that are associated to this member. roles: List[:class:`Role`] The list of roles associated to this member. """ if typing.TYPE_CHECKING: # -- Member properties -- guild: Guild user: User nickname: typing.Optional[str] guild_avatar: typing.Optional[str] deaf: bool mute: bool pending: bool joined_at: datetime premium_since: typing.Optional[datetime] timeout_until: typing.Optional[datetime] role_ids: typing.List[int] roles: typing.List[Role] # -- User properties (applied by _user_features decorator) -- id: int discriminator: str bot: bool system: bool accent_color: int locale: str premium_type: int flags: UserFlags public_flags: UserFlags premium_type: int banner: typing.Optional[str] mention: str proper_name: str default_avatar: str default_avatar_url = User.default_avatar_url banner_url = User.banner_url is_banner_animated = User.is_banner_animated create_dm = User.create_dm send = User.send __slots__ = ("guild", "_client", "user", "nickname", "guild_avatar", "deaf", "mute", "pending", "joined_at", "premium_since", "timeout_until", "role_ids", "roles") def __init__(self, data: typing.Dict[str, typing.Any], guild: Guild) -> None: self.guild = guild self._client = guild._client self._update_with_data(data) def _update_with_data(self, data: typing.Dict[str, typing.Any]) -> None: self.user = User(data["user"], client=self._client) self.nickname = data.get("nick") self.guild_avatar = data.get("avatar") self.deaf = data.get("deaf", False) self.mute = data.get("mute", False) self.pending = data.get("pending", False) premium_since = data.get("premium_since") timeout_until = data.get("communication_disabled_until") self.joined_at = parse_iso_timestamp(data["joined_at"]) self.premium_since = parse_iso_timestamp(premium_since) if premium_since is not None else None self.timeout_until = parse_iso_timestamp(timeout_until) if timeout_until is not None else None role_ids = [int(role_id) for role_id in data.get("roles", [])] roles = [] guild = self.guild for role_id in role_ids: role = guild.cache.get_role(role_id) if role is not None: roles.append(role) self.role_ids = role_ids self.roles = roles @property def name(self) -> str: r"""Returns the name of this member as displayed in the guild. This property would return the :attr:`.nickname` of the member if it's present and would fallback to underlying user's :attr:`~User.name` if nickname is not available. Returns ------- :class:`builtins.str` """ nick = self.nickname if nick is not None: return nick return self.user.name @property def avatar(self) -> typing.Optional[str]: r"""Returns the avatar's hash of this member as displayed in the guild. This property would return the :attr:`.guild_avatar` of this member if available and would fallback to underlying user's :attr:`~User.avatar` when unavailable. If user has no avatar set, ``None`` would be returned. Returns ------- Optional[:class:`builtins.str`] """ guild_avatar = self.guild_avatar if guild_avatar is not None: return guild_avatar return self.user.avatar def avatar_url(self, extension: str = None, size: int = None) -> typing.Optional[str]: r"""Returns the avatar URL for this member. This method returns URL for the member's displayed :attr:`.avatar` i.e use the guild specific member avatar if present otherwise user's global avatar. If none of these avatars are set, The result of :meth:`.default_avatar_url` is returned instead. The ``extension`` parameter only supports following extensions in the case of avatars: - :attr:`ImageExtension.GIF` - :attr:`ImageExtension.PNG` - :attr:`ImageExtension.JPG` - :attr:`ImageExtension.JPEG` - :attr:`ImageExtension.WEBP` Parameters ---------- extension: :class:`builtins.str` The extension to use in the URL. If not supplied, An ideal extension will be picked depending on whether member has static or animated avatar. size: :class:`builtins.int` The size to append to URL. Can be any power of 2 between 64 and 4096. Raises ------ ValueError Invalid extension or size was passed. """ avatar = self.guild_avatar if avatar is None: return self.user.avatar_url(extension=extension, size=size) if extension is None: extension = "gif" if self.is_avatar_animated() else "png" return create_cdn_url( f"/guilds/{self.guild.id}/users/{self.id}/{self.avatar}", extension=extension, size=size, valid_exts=BASIC_EXTS, ) def is_avatar_animated(self) -> bool: r"""Checks whether the member's avatar is animated. This method checks for the :attr:`.avatar` to be animated i.e either one of member's guild specific or underlying user's avatar should be animated. To check specifically for the underlying user's avatar, Consider using :meth:`User.is_avatar_animated` instead. Returns ------- :class:`builtins.bool` """ avatar = self.avatar if avatar is None: return False return avatar.startswith("a_") def is_boosting(self) -> bool: r"""Checks whether the member is boosting the guild. Returns ------- :class:`builtins.bool` """ return self.premium_since is not None def is_timed_out(self) -> bool: r"""Checks whether the member is timed out. Returns ------- :class:`builtins.bool` """ timeout_until = self.timeout_until if timeout_until is None: return False now = datetime.now() return now < timeout_until async def kick(self, *, reason: str = None) -> None: r"""Kicks the member from the associated guild. Bot requires the :attr:`~Permissions.kick_members` permission in the relevant guild to perform this action. Parameters ---------- reason: :class:`builtins.str` The reason for this action that shows up on audit log. Raises ------ HTTPForbidden Missing permissions. HTTPException Failed to perform this action. """ guild = self.guild await guild._rest.kick_guild_member(guild_id=guild.id, user_id=self.user.id, reason=reason) async def edit( self, *, nickname: typing.Optional[str] = UNDEFINED, roles: typing.List[Role] = UNDEFINED, mute: bool = UNDEFINED, deaf: bool = UNDEFINED, timeout_until: datetime = UNDEFINED, reason: str
import numpy as np from sklearn.metrics import roc_auc_score import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from os.path import join import dask.dataframe as dd import pandas as pd import seaborn as sns from collections import Counter import xarray as xr import cartopy.crs as ccrs import cartopy.feature as cfeature from scipy.ndimage.filters import gaussian_filter from .process import generate_mode_grid sns.set_style("darkgrid") def corr_coef_metric(y_true, y_pred): return np.corrcoef(y_true, y_pred)[0, 1] def score_neurons(y_true, neuron_activations, metric="auc"): scores = np.zeros(neuron_activations.shape[1]) if metric == "auc": for i in range(neuron_activations.shape[1]): scores[i] = roc_auc_score(y_true, neuron_activations[:, i]) elif metric == "r": for i in range(neuron_activations.shape[1]): scores[i] = corr_coef_metric(y_true, neuron_activations[:, i]) else: for i in range(neuron_activations.shape[1]): scores[i] = roc_auc_score(y_true, neuron_activations[:, i]) return scores def plot_neuron_composites(out_path, model_desc, x_data, neuron_activations, neuron_scores, variable_name, composite_size=30, figsize_scale=3.0, out_format="png", dpi=200, plot_kwargs=None, colorbar_loc=(0.93, 0.1, 0.02, 0.8)): neuron_ranking = np.argsort(neuron_scores)[::-1] variable_index = np.where(x_data.var_name == variable_name)[0][0] if plot_kwargs is None: plot_kwargs = {} fig_rows = int(np.floor(np.sqrt(neuron_scores.size))) fig_cols = int(np.ceil(neuron_scores.size / fig_rows)) fig, axes = plt.subplots(fig_rows, fig_cols, figsize=(fig_cols * figsize_scale, fig_rows * figsize_scale)) plot_kwargs["vmin"] = x_data[..., variable_index].min() plot_kwargs["vmax"] = x_data[..., variable_index].max() pc = None for a, ax in enumerate(axes.ravel()): if a >= neuron_scores.size: ax.set_visible(False) continue example_rankings = np.argsort(neuron_activations[:, neuron_ranking[a]])[::-1][:composite_size] x_composite = x_data[example_rankings, :, :, variable_index].mean(axis=0) pc = ax.pcolormesh(x_composite, **plot_kwargs) ax.axes.xaxis.set_ticklabels([]) ax.axes.yaxis.set_ticklabels([]) ax.set_title("Neuron {0:d} Score {1:0.3f}".format(neuron_ranking[a], neuron_scores[neuron_ranking[a]])) if pc is not None: cb_ax = fig.add_axes(colorbar_loc) cbar = fig.colorbar(pc, cax=cb_ax) fig.suptitle(model_desc.replace("_", " ") + " " + variable_name + " Neuron Example Composites") plt.savefig(join(out_path, f"neuron_composite_{variable_name}_{model_desc}.{out_format}"), dpi=dpi, bbox_inches="tight") plt.close() return def plot_saliency_composites(out_path, model_name, saliency_data, neuron_activations, neuron_scores, variable_name, composite_size=30, figsize_scale=3.0, out_format="png", dpi=200, plot_kwargs=None, colorbar_loc=(0.93, 0.1, 0.02, 0.8)): neuron_ranking = np.argsort(neuron_scores)[::-1] variable_index = np.where(saliency_data.var_name == variable_name)[0][0] if plot_kwargs is None: plot_kwargs = {} fig_rows = int(np.floor(np.sqrt(neuron_scores.size))) fig_cols = int(np.ceil(neuron_scores.size / fig_rows)) fig, axes = plt.subplots(fig_rows, fig_cols, figsize=(fig_cols * figsize_scale, fig_rows * figsize_scale)) plot_kwargs["vmin"] = saliency_data.sel(var_name=variable_name).min().values[()] plot_kwargs["vmax"] = saliency_data.sel(var_name=variable_name).max().values[()] pc = None for a, ax in enumerate(axes.ravel()): if a >= neuron_scores.size: ax.set_visible(False) continue example_rankings = np.argsort(neuron_activations[:, neuron_ranking[a]])[::-1][:composite_size] saliency_composite = saliency_data[neuron_ranking[a], example_rankings, :, :, variable_index].mean(axis=0) pc = ax.pcolormesh(saliency_composite, **plot_kwargs) ax.axes.xaxis.set_ticklabels([]) ax.axes.yaxis.set_ticklabels([]) ax.set_title("Neuron {0:d} Score {1:0.3f}".format(neuron_ranking[a], neuron_scores[neuron_ranking[a]])) if pc is not None: cb_ax = fig.add_axes(colorbar_loc) cbar = fig.colorbar(pc, cax=cb_ax) fig.suptitle(model_name.replace("_", " ") + " " + variable_name + " Saliency Composites") plt.savefig(join(out_path, f"saliency_composite_{variable_name}_{model_name}.{out_format}"), dpi=dpi, bbox_inches="tight") plt.close() return def plot_top_activations(out_path, model_name, x_data, meta_df, neuron_activations, neuron_scores, saliency_data, variable_name, panel_size=16, figsize_scale=3.0, out_format="png", dpi=200, plot_kwargs=None, colorbar_loc=(0.93, 0.1, 0.02, 0.8)): if plot_kwargs is None: plot_kwargs = {} fig_rows = int(np.floor(np.sqrt(panel_size))) fig_cols = int(np.ceil(panel_size / fig_rows)) for neuron_number in range(neuron_scores.size): n_rank = neuron_activations[f"neuron_{neuron_number:03d}"].argsort()[::-1].values fig, axes = plt.subplots(fig_rows, fig_cols, figsize=(fig_cols * figsize_scale, fig_rows * figsize_scale), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) sal_ex = saliency_data[neuron_number, n_rank[:panel_size]].sel(var_name=variable_name) sal_max = np.abs(sal_ex).max() pc = None for a, ax in enumerate(axes.ravel()): pc = ax.pcolormesh(x_data[n_rank[a], :, :, 0], **plot_kwargs) ax.contour(-sal_ex[a], 6, vmin=-sal_max, vmax=sal_max, cmap="RdBu_r") ax.set_xticks(np.arange(0, 32, 8)) ax.set_yticks(np.arange(0, 32, 8)) ex_n_score = neuron_activations.loc[n_rank[a], f"neuron_{neuron_number:03d}"] ax.text(0, 0, pd.Timestamp(meta_df.loc[n_rank[a], "time"]).strftime("%Y-%m-%d %HZ") + " S:{0:0.2f}".format(ex_n_score), bbox=dict(facecolor='white', alpha=0.5)) if pc is not None: cb_ax = fig.add_axes(colorbar_loc) cbar = fig.colorbar(pc, cax=cb_ax) fig.suptitle(f"Neuron {neuron_number} Top Activated Storms, Score: {neuron_scores[neuron_number]:0.3f}", fontsize=14, y=0.95) plt.savefig(join(out_path, f"top_activations_neuron_{variable_name}_{neuron_number:03d}_{model_name}.{out_format}"), dpi=dpi, bbox_inches="tight") plt.close() return def cape_shear_modes(neuron_activations, output_path, data_path, mode, model_name, gmm_name=None, cluster=False, num_storms=5000): """ Match specified number of top storms of each neuron, fetch storm patch, and then plot bivariate density of each nueron in CAPE/Shear space. Args: neuron_activations: CSV file of neuron activations output_path: Path to save output data_path: Absolute path of netcdf patch data model_name: name of model used for training mode: data partition: 'train', 'val', or 'test' num_storms: number of top activated storms to use for density estimation for each neuron Returns: """ if cluster: col_type = 'cluster' else: col_type = 'neuron' if gmm_name is not None: file_name = f'CAPE_Shear_{model_name}_{gmm_name}_{mode}.png' else: file_name = f'CAPE_Shear_{model_name}_{mode}.png' df = pd.DataFrame(columns=['CAPE', '6km Shear', 'Value', col_type.capitalize()]) cols = list(neuron_activations.columns[neuron_activations.columns.str.contains(col_type)]) csv_path = data_path.rstrip('/')[:-2] + 'csv' dates = sorted(set(neuron_activations['run_date'].astype('datetime64[ns]'))) file_strings = [join(csv_path, f'track_step_NCARSTORM_d01_{x.strftime("%Y%m%d")}-0000.csv') for x in dates] ddf = dd.read_csv(file_strings).compute() for col in cols: sub = neuron_activations.sort_values(by=[col], ascending=False).iloc[:num_storms, :] activation = sub[col].values x = ddf.iloc[sub.index, :] cape = x['MLCAPE-potential_max'].values shear = np.sqrt(x['USHR6-potential_mean']**2 + x['USHR6-potential_mean']**2).values df = df.append(pd.DataFrame(zip(cape, shear, activation, [col] * num_storms), columns=df.columns)) plt.figure(figsize=(20, 16)) sns.set(font_scale=1.5) colors = sns.color_palette("deep", len(cols)) sns.scatterplot(data=df, x='CAPE', y='6km Shear', hue=col_type.capitalize(), alpha=0.5, size='Value', sizes=(1, 200), palette=colors, edgecolors='k', linewidth=1) sns.kdeplot(data=df, x='CAPE', y='6km Shear', hue=col_type.capitalize(), fill=False, alpha=1, thresh=0.4, levels=3, palette=colors, clip=(0, 6000), linewidths=8, legend=False) plt.title(f'{col_type.capitalize()} for Top {num_storms} Storms ({mode})') plt.savefig(join(output_path, file_name), dpi=300, bbox_inches='tight') return def spatial_neuron_activations(neuron_activations, output_path, mode, model_name, gmm_name=None, cluster=False, quant_thresh=0.99): """ Plot spatial distribution of top activated storms for each neuron Args: neuron_activations: CSV file of neuron activations output_path: Output path from config model_name: Model name from config mode: Data partition (train, va, or test) quant_thresh: Quantile to select storms that exceed threshold Returns: """ if cluster: col_type = 'cluster' else: col_type = 'neuron' if gmm_name is not None: file_name = f'Spatial_activations_{model_name}_{gmm_name}_{mode}.png' else: file_name = f'Spatial_activations_{model_name}_{mode}.png' fig = plt.figure(figsize=(20, 16)) lcc = ccrs.LambertConformal(central_longitude=-97.5, standard_parallels=(38.5, 38.5)) ax = fig.add_subplot(1, 1, 1, projection=lcc) ax.set_extent([-120, -74, 25, 50], crs=ccrs.PlateCarree()) ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.OCEAN) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.BORDERS) ax.add_feature(cfeature.LAKES, alpha=0.5) ax.add_feature(cfeature.STATES) columns = list(neuron_activations.columns[neuron_activations.columns.str.contains(col_type)]) colors = sns.color_palette("deep", len(columns)) for i, col in enumerate(columns): data = neuron_activations[neuron_activations[col] > neuron_activations[col].quantile(quant_thresh)] var = data[col] plt.scatter(data['centroid_lon'], data['centroid_lat'], transform=ccrs.PlateCarree(), label=None, color=colors[i], alpha=0.25, s=2.5) sns.kdeplot(data['centroid_lon'], data['centroid_lat'], data=var, levels=3, transform=ccrs.PlateCarree(), linewidths=5, thresh=0, color=colors[i], linestyles='--', label=f'{col.capitalize()} {i}') plt.legend(prop={'size': 16}) plt.title(f'Storm Activations Above {quant_thresh} Quantile - {mode}', fontsize=30) plt.savefig(join(output_path, file_name), dpi=300, bbox_inches='tight') def diurnal_neuron_activations(neuron_activations, output_path, mode, model_name, gmm_name=None, cluster=False, quant_thresh=0.99): """ Plot diurnal distribution of each neuron Args: neuron_activations: CSV file of neuron activations output_path: Base output path from config model_name: Model name from config mode: Data partition (train, val, test) quant_thresh: Quantile to select storms that exceed threshold Returns: """ if cluster: col_type = 'cluster' else: col_type = 'neuron' if gmm_name is not None: file_name = f'Diurnal_activations_{model_name}_{gmm_name}_{mode}.png' else: file_name = f'Diurnal_activations_{model_name}_{mode}.png' fig, ax = plt.subplots(figsize=(20, 8)) df = neuron_activations.copy() df.time = df.time.astype('datetime64[ns]').reset_index(drop=True) - pd.Timedelta(6, 'H') columns = list(neuron_activations.columns[neuron_activations.columns.str.contains(col_type)]) colors = sns.color_palette("deep", len(columns)) for i, col in enumerate(columns): data = df[df[col] > df[col].quantile(quant_thresh)].groupby(df['time'].dt.hour)[col].count() plt.plot(data, linewidth=4, alpha=1, label=col.capitalize(), color=colors[i]) plt.legend(prop={'size': 16}) plt.title(f'Diurnal Distribution of Storm Activations Above {quant_thresh} Quantile - {mode}', fontsize=30) ax.set_ylabel('Number of Storms', fontsize=20) ax.set_xlabel('UTC - 6', fontsize=20) ax.xaxis.set_tick_params(labelsize=16) ax.yaxis.set_tick_params(labelsize=16) plt.savefig(join(output_path, file_name), dpi=300, bbox_inches='tight') def plot_cluster_dist(data, output_path, cluster_type, n_cluster): """ Bar plot of clusters by percentage. Args: data: Neuron activation dataframe with cluster labels output_path: Output path to save to cluster_type: Cluster algorithm used for file naming n_cluster: Number of unique clusters in clustering algorithm Returns: """ plt.figure(figsize=(12, 6)) counts = Counter(data['label']) percent_counts = [x / sum(counts.values()) * 100 for x in counts.values()] sns.barplot(list(counts.keys()), percent_counts, palette='deep') plt.xlabel('Cluster', fontsize=14) plt.ylabel('Percent of Storms', fontsize=14) plt.savefig(join(output_path, f'{cluster_type}_{n_cluster}_dist.png'), bbox_inches='tight') return counts def plot_prob_dist(data, output_path, cluster_type, n_cluster): """ KDE plot of cluster probabilities. Args: data: Neuron activation dataframe with cluster labels output_path: Output path to save to cluster_type: Cluster algorithm used for file naming n_cluster: Number of unique clusters in clustering algorithm Returns: """ plt.figure(figsize=(12, 6)) sns.displot(data=data, x='label prob', hue='label', multiple='stack', palette='dark', kind='kde', aspect=2) plt.xlabel('Cluster Probability', fontsize=14) plt.ylabel('Density', fontsize=14) plt.savefig(join(output_path, f'{cluster_type}_{n_cluster}_prob_dist.png'), bbox_inches='tight') def plot_prob_cdf(data, output_path, cluster_type, n_cluster): """ Plot CDF of cluster label probabilities. Args: data: Neuron activation dataframe with cluster labels output_path: Output path to save to cluster_type: Cluster algorithm used for file naming n_cluster: Number of unique clusters in clustering algorithm Returns: """ sns.displot(data['label prob'], kind='ecdf', height=12) plt.xlabel('Cluster Probability', fontsize=14) plt.ylabel('Density', fontsize=14) plt.savefig(join(output_path, f'{cluster_type}_{n_cluster}_prob_cdf.png'), bbox_inches='tight') def plot_storm_clusters(patch_data_path, output_path, cluster_data, cluster_method, seed, n_storms=25, prob_type='highest'): """ Args: patch_data_path: Path where storm patches are located. output_path: Output path to save file. cluster_data: Neuron activation dataframe with cluster labels cluster_method: Cluster algorithm used for file naming seed: Random seed used for sampling. n_storms: Number of storms to plot per cluster (should be an even square - 4, 9. 16, 25, ...) prob_type: Probability of storms to plot (only valid for 'GMM'). Should be 'highest', 'lowest', or 'random' Returns: """ ms_mph = 2.237 file_dates = sorted(pd.to_datetime(cluster_data['run_date'].unique())) file_paths = sorted( [join(patch_data_path, f'NCARSTORM_{x.strftime("%Y%m%d")}-0000_d01_model_patches.nc') for x in file_dates]) ds = xr.open_mfdataset(file_paths, combine='nested', concat_dim='p') wind_slice = (slice(8, None, 12), slice(8, None, 12)) x_mesh, y_mesh = np.meshgrid(range(len(ds['row'])), range(len(ds['col']))) n_clusters = cluster_data['cluster'].nunique() for cluster in range(n_clusters): if cluster_method == 'Spectral': sub = cluster_data[cluster_data['cluster'] == cluster].sample(n_storms, random_state=seed) elif cluster_method == 'GMM': if prob_type ==
# -*- coding: utf-8 -*- """ Test the RescaleToBound class. """ import numpy as np import pytest from unittest.mock import MagicMock, call, create_autospec, patch from nessai.reparameterisations import RescaleToBounds from nessai.livepoint import get_dtype, numpy_array_to_live_points @pytest.fixture def reparam(): return create_autospec(RescaleToBounds) @pytest.fixture() def reparameterisation(model): def _get_reparameterisation(kwargs): return RescaleToBounds(parameters=model.names, prior_bounds=model.bounds, **kwargs) return _get_reparameterisation @pytest.fixture(scope='function') def is_invertible(model, n=100): def test_invertibility(reparam, model=model, decimal=16): x = model.new_point(N=n) x_prime = np.zeros([n], dtype=get_dtype(reparam.prime_parameters)) log_j = np.zeros(n) assert x.size == x_prime.size x_re, x_prime_re, log_j_re = reparam.reparameterise( x, x_prime, log_j) x_in = np.zeros([x_re.size], dtype=get_dtype(reparam.parameters)) log_j = np.zeros(x_re.size) x_inv, x_prime_inv, log_j_inv = \ reparam.inverse_reparameterise(x_in, x_prime_re, log_j) m = x_re.size // n for i in range(m): start, end = (i * n), (i + 1) * n for name in x.dtype.names: np.testing.assert_array_almost_equal( x[name], x_re[name][start:end], decimal=decimal, ) np.testing.assert_array_almost_equal( x[name], x_inv[name][start:end], decimal=decimal, ) for name in x_prime.dtype.names: np.testing.assert_array_almost_equal( x_prime_re[name], x_prime_inv[name], decimal=decimal, ) np.testing.assert_array_almost_equal( log_j_re, -log_j_inv, decimal=decimal ) return True return test_invertibility @pytest.mark.parametrize( "input, expected_value", [ (None, {'x': [-1, 1], 'y': [-1, 1]}), ([0, 1], {'x': [0, 1], 'y': [0, 1]}), ({'x': [0, 1], 'y': [-1, 1]}, {'x': [0, 1], 'y': [-1, 1]}), ] ) def test_rescale_bounds_config(reparam, input, expected_value): """Assert the rescale bounds are set correctly.""" RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [-1, 1], 'y': [0, 1]}, rescale_bounds=input, ) assert reparam.rescale_bounds == expected_value def test_rescale_bounds_dict_missing_params(reparam): """Assert an error is raised if the rescale_bounds dict is missing a parameter. """ with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [-1, 1], 'y': [0, 1]}, rescale_bounds={'x': [0, 1]} ) assert 'Missing rescale bounds for parameters' in str(excinfo.value) def test_rescale_bounds_incorrect_type(reparam): """Assert an error is raised if the rescale_bounds is an invalid type.""" with pytest.raises(TypeError) as excinfo: RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [-1, 1], 'y': [0, 1]}, rescale_bounds=1, ) assert 'must be an instance of list or dict' in str(excinfo.value) @pytest.mark.parametrize( "input, expected_value", [ (True, {'x': 'split', 'y': 'split'}), (['x'], {'x': 'split'}), ({'x': 'split'}, {'x': 'split'}), ] ) def test_boundary_inversion_config(reparam, input, expected_value): """Assert the boundary inversion dict is set correctly""" RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [0, 1], 'y': [0, 1]}, boundary_inversion=input, ) assert reparam.boundary_inversion == expected_value def test_boundary_inversion_invalid_type(reparam): """Assert an error is raised in the type is invalid""" with pytest.raises(TypeError) as excinfo: RescaleToBounds.__init__( reparam, parameters='x', prior_bounds=[0, 1], boundary_inversion='Yes', ) assert 'boundary_inversion must be a list, dict or bool' \ in str(excinfo.value) def test_detect_edges_without_inversion(reparam): """Assert detect edges cannot be used with boundary inversion""" with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [-1, 1], 'y': [0, 1]}, detect_edges=True, ) assert 'Must enable boundary inversion to use detect edges' \ in str(excinfo.value) def test_set_bounds(reparam): """Test the set bounds method.""" reparam.parameters = ['x'] reparam.rescale_bounds = {'x': np.array([-1, 1])} reparam.pre_rescaling = lambda x: (x / 2, np.zeros_like(x)) reparam.offsets = {'x': 1} RescaleToBounds.set_bounds(reparam, {'x': np.array([-10, 10])}) np.testing.assert_array_equal(reparam.pre_prior_bounds['x'], [-5, 5]) np.testing.assert_array_equal(reparam.bounds['x'], [-6, 4]) def test_set_offets(reparam): """Assert the offset are set correctly""" reparam.pre_rescaling = lambda x: (x / 2, 0.0) RescaleToBounds.__init__( reparam, parameters=['x', 'y'], prior_bounds={'x': [8, 32], 'y': [2, 4]}, offset=True, ) assert reparam.offsets == {'x': 10.0, 'y': 1.5} def test_reset_inversion(reparam): """Assert the edges are reset correctly""" reparam.parameters = ['x', 'y'] reparam._edges = {'x': [-10, 10], 'y': [-5, 5]} RescaleToBounds.reset_inversion(reparam) assert reparam._edges == {'x': None, 'y': None} def test_x_prime_log_prior_error(reparam): """Assert an error is raised if the prime prior is not defined.""" reparam.has_prime_prior = False with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.x_prime_log_prior(reparam, 0.1) assert 'Prime prior is not configured' in str(excinfo.value) def test_default_pre_rescaling(reparam): """Assert the default pre-rescaling is the identity""" x = np.array([1, 2, 3]) expected_log_j = np.zeros(3) x_out, log_j = RescaleToBounds.pre_rescaling(reparam, x) x_out_inv, log_j_inv = RescaleToBounds.pre_rescaling_inv(reparam, x) np.testing.assert_array_equal(x_out, x) np.testing.assert_array_equal(x_out_inv, x) np.testing.assert_array_equal(log_j, expected_log_j) np.testing.assert_array_equal(log_j_inv, expected_log_j) def test_default_post_rescaling(reparam): """Assert the default post-rescaling is the identity""" x = np.array([1, 2, 3]) expected_log_j = np.zeros(3) x_out, log_j = RescaleToBounds.post_rescaling(reparam, x) x_out_inv, log_j_inv = RescaleToBounds.post_rescaling_inv(reparam, x) np.testing.assert_array_equal(x_out, x) np.testing.assert_array_equal(x_out_inv, x) np.testing.assert_array_equal(log_j, expected_log_j) np.testing.assert_array_equal(log_j_inv, expected_log_j) def test_configure_pre_rescaling_none(reparam): """Test the configuration of the pre-rescaling if it is None""" RescaleToBounds.configure_pre_rescaling(reparam, None) assert reparam.has_pre_rescaling is False def test_configure_post_rescaling_none(reparam): """Test the configuration of the post-rescaling if it is None""" RescaleToBounds.configure_post_rescaling(reparam, None) assert reparam.has_post_rescaling is False def test_pre_rescaling_with_functions(reparam): """Assert that specifying functions works as intended""" rescaling = (np.exp, np.log) RescaleToBounds.configure_pre_rescaling(reparam, rescaling) assert reparam.has_pre_rescaling is True assert reparam.pre_rescaling is np.exp assert reparam.pre_rescaling_inv is np.log def test_post_rescaling_with_functions(reparam): """Assert that specifying functions works as intended""" rescaling = (np.exp, np.log) RescaleToBounds.configure_post_rescaling(reparam, rescaling) assert reparam.has_post_rescaling is True assert reparam.has_prime_prior is False assert reparam.post_rescaling is np.exp assert reparam.post_rescaling_inv is np.log def test_pre_rescaling_with_str(reparam): """Assert that specifying a str works as intended""" from nessai.utils.rescaling import rescaling_functions rescaling = 'logit' RescaleToBounds.configure_pre_rescaling(reparam, rescaling) assert reparam.has_pre_rescaling is True assert reparam.pre_rescaling is rescaling_functions['logit'][0] assert reparam.pre_rescaling_inv is rescaling_functions['logit'][1] def test_pre_rescaling_with_invalid_str(reparam): """Assert an error is raised if the rescaling is not recognised""" rescaling = 'not_a_rescaling' with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.configure_pre_rescaling(reparam, rescaling) assert 'Unknown rescaling function: not_a_rescaling' in str(excinfo.value) def test_post_rescaling_with_invalid_str(reparam): """Assert an error is raised if the rescaling is not recognised""" rescaling = 'not_a_rescaling' with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.configure_post_rescaling(reparam, rescaling) assert 'Unknown rescaling function: not_a_rescaling' in str(excinfo.value) def test_post_rescaling_with_str(reparam): """Assert that specifying a str works as intended. Also test the config for the logit """ reparam._update_bounds = False reparam.parameters = ['x'] from nessai.utils.rescaling import rescaling_functions rescaling = 'logit' RescaleToBounds.configure_post_rescaling(reparam, rescaling) assert reparam.has_post_rescaling is True assert reparam.has_prime_prior is False assert reparam.post_rescaling is rescaling_functions['logit'][0] assert reparam.post_rescaling_inv is rescaling_functions['logit'][1] assert reparam.rescale_bounds == {'x': [0, 1]} def test_post_rescaling_with_logit_update_bounds(reparam): """Assert an error is raised if using logit and update bounds""" reparam._update_bounds = True rescaling = 'logit' with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.configure_post_rescaling(reparam, rescaling) assert 'Cannot use logit with update bounds' in str(excinfo.value) def test_pre_rescaling_invalid_input(reparam): """Assert an error is raised if the input isn't a str or tuple""" with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.configure_pre_rescaling(reparam, (np.exp, )) assert 'Pre-rescaling must be a str or tuple' in str(excinfo.value) def test_post_rescaling_invalid_input(reparam): """Assert an error is raised if the input isn't a str or tuple""" with pytest.raises(RuntimeError) as excinfo: RescaleToBounds.configure_post_rescaling(reparam, (np.exp, )) assert 'Post-rescaling must be a str or tuple' in str(excinfo.value) def test_update_bounds_disabled(reparam, caplog): """Assert nothing happens in _update_bounds is False""" caplog.set_level('DEBUG') reparam._update_bounds = False RescaleToBounds.update_bounds(reparam, [0, 1]) assert 'Update bounds not enabled' in str(caplog.text) def test_update_bounds(reparam): """Assert the correct values are returned""" reparam.offsets = {'x': 0.0, 'y': 1.0} reparam.pre_rescaling = MagicMock( side_effect=lambda x: (x, np.zeros_like(x)) ) reparam.parameters = ['x', 'y'] x = {'x': [-1, 0, 1], 'y': [-2, 0, 2]} RescaleToBounds.update_bounds(reparam, x) reparam.update_prime_prior_bounds.assert_called_once() reparam.pre_rescaling.assert_has_calls( [call(-1), call(1), call(-2), call(2)] ) assert reparam.bounds == {'x': [-1, 1], 'y': [-3, 1]} def test_reparameterise(reparam): """Test the reparameterise function""" reparam.has_pre_rescaling = False reparam.has_post_rescaling = False reparam.parameters = ['x'] reparam.prime_parameters = ['x_prime'] reparam.offsets = {'x': 1.0} reparam.boundary_inversion = {} x = numpy_array_to_live_points( np.array([(1.0,), (2.0,)]), reparam.parameters ) x_prime_in = np.zeros([2, ], dtype=get_dtype(reparam.prime_parameters)) x_prime_val = np.array([0.0, 0.5]) log_j = np.zeros(x.size) reparam._rescale_to_bounds = MagicMock( return_value=(x_prime_val, np.array([0, 0.5])) ) x_out, x_prime_out, log_j_out = \ RescaleToBounds.reparameterise(reparam, x, x_prime_in, log_j) np.testing.assert_array_equal( np.array([0.0, 1.0]), reparam._rescale_to_bounds.call_args_list[0][0][0] ) assert reparam._rescale_to_bounds.call_args_list[0][0][1] == 'x' np.testing.assert_array_equal(x, x_out) np.testing.assert_array_equal(x_prime_out['x_prime'], x_prime_val) np.testing.assert_array_equal(log_j_out, np.array([0.0, 0.5])) def test_inverse_reparameterise(reparam): """Test the inverse_reparameterise function""" reparam.has_pre_rescaling = False reparam.has_post_rescaling = False reparam.parameters = ['x'] reparam.prime_parameters = ['x_prime'] reparam.offsets = {'x': 1.0} reparam.boundary_inversion = {} x_prime = numpy_array_to_live_points( np.array([(1.0,), (2.0,)]), reparam.prime_parameters ) x_in = np.zeros([2, ], dtype=get_dtype(reparam.parameters)) x_val = np.array([0.0, 0.5]) log_j = np.zeros(x_prime.size) reparam._inverse_rescale_to_bounds = MagicMock( return_value=(x_val, np.array([0, 0.5])) ) x_out, x_prime_out, log_j_out = \ RescaleToBounds.inverse_reparameterise(reparam, x_in, x_prime, log_j) # x[p] is updated in place, can't test inputs reparam._inverse_rescale_to_bounds.assert_called_once() assert \ reparam._inverse_rescale_to_bounds.call_args_list[0][0][1] == 'x' np.testing.assert_array_equal(x_prime_out, x_prime) np.testing.assert_array_equal(x_out['x'], x_val + 1.0) np.testing.assert_array_equal(log_j_out, np.array([0.0, 0.5])) def test_reparameterise_boundary_inversion(reparam): """Test the reparameterise function with boundary inversion""" reparam.has_pre_rescaling = False reparam.has_post_rescaling = False reparam.parameters = ['x'] reparam.prime_parameters = ['x_prime'] reparam.offsets = {'x': 1.0} reparam.boundary_inversion = {'x': 'split'} x = numpy_array_to_live_points( np.array([(1.0,), (2.0,)]), reparam.parameters ) inversion_out = numpy_array_to_live_points( np.array([(-1.0,), (-2.0,), (1.0,), (2.0,)]), reparam.prime_parameters ) x_prime_in = np.zeros([2, ], dtype=get_dtype(reparam.prime_parameters)) log_j = np.zeros(x.size) x_ex = np.concatenate([x, x]) x_prime_ex = inversion_out log_j_ex = np.array([0, 0.5, 0, 0.5]) reparam._apply_inversion = MagicMock( return_value=(x_ex, x_prime_ex, log_j_ex) ) x_out, x_prime_out, log_j_out = RescaleToBounds.reparameterise( reparam, x, x_prime_in, log_j, compute_radius=True, test='test', ) np.testing.assert_array_equal( reparam._apply_inversion.call_args_list[0][0][0], x ) np.testing.assert_array_equal( reparam._apply_inversion.call_args_list[0][0][1], x_prime_in, ) np.testing.assert_array_equal( reparam._apply_inversion.call_args_list[0][0][2], log_j ) assert reparam._apply_inversion.call_args_list[0][0][3] == 'x' assert reparam._apply_inversion.call_args_list[0][0][4] == 'x_prime' assert reparam._apply_inversion.call_args_list[0][0][5] is True assert reparam._apply_inversion.call_args_list[0][1] == {'test': 'test'} np.testing.assert_array_equal(x_out, x_ex) np.testing.assert_array_equal(x_prime_out, x_prime_ex) np.testing.assert_array_equal(log_j_out, log_j_ex) def test_inverse_reparameterise_boundary_inversion(reparam): """Test the inverse_reparameterise function with boundary inversion""" reparam.has_pre_rescaling = False reparam.has_post_rescaling = False reparam.parameters = ['x'] reparam.prime_parameters = ['x'] reparam.offsets =
<filename>exp1_bot_detection/SATAR_FT/SATAR_FT.py<gh_stars>1-10 import torch import numpy import math import pandas as pd import torch.autograd as autograd import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import os import random import torch.nn.utils.rnn as rnn from torch.utils.data import Dataset, DataLoader import argparse import torch.distributed as dist import time # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~model subclass~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Attention Mechanism in a nutshell # Input : impending vectors of size vector_num * vector_size # Output : Attentioned representation of size vector_size class Attention(nn.Module): def __init__(self, vector_size): super(Attention, self).__init__() self.vector_size = vector_size self.fc = nn.Linear(vector_size, vector_size) # self.fc.bias.data.fill_(0) self.weightparam = nn.Parameter(torch.randn(vector_size, 1)) def forward(self, vectors): # hidden = torch.tanh(self.fc(vectors)) # print(hidden) weight = torch.tanh(self.fc(vectors)).matmul(self.weightparam) # del hidden # torch.cuda.empty_cache() # torch.cuda.synchronize() weight = F.softmax(weight, dim=0) # print(weight) rep = vectors.mul(weight) # del weight torch.cuda.empty_cache() torch.cuda.synchronize() rep = rep.sum(dim=0) return rep # Model for word-level semantics extraction (2 initializations: 1 WordLevel(batch_size of 1) 1 TweetLevelLow) # Input : sequence of size seq_len * batch_size (maybe a result of rnn.pad_sequence?) # Output : seq_len * batch_size * rep_size class SemanticWord(nn.Module): def __init__(self, embedding_dim, rep_size, batch_size, num_layer, embed_layer, p): super(SemanticWord, self).__init__() self.hidden_dim = int(rep_size / 2) self.embedding_dim = embedding_dim self.rep_size = rep_size self.batch_size = batch_size self.num_layer = num_layer self.word_embeddings = embed_layer self.word_embeddings.weight.requires_grad = False self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim, bidirectional=True, dropout=p, num_layers=num_layer) # self.hidden = self.init_hidden() # self.hidden = (self.hidden[0].type(torch.FloatTensor).cuda(non_blocking=True), self.hidden[1].type(torch.FloatTensor).cuda(non_blocking=True)) # self.register_buffer('hidden', self.hidden) def init_hidden(self, batch_size): # torch.cuda.FloatTensor(1000, 1000).fill_(0) temp = (torch.cuda.FloatTensor(2 * self.num_layer, batch_size, self.hidden_dim).fill_(0), torch.cuda.FloatTensor(2 * self.num_layer, batch_size, self.hidden_dim).fill_(0)) return temp # return (temp[0].type(torch.FloatTensor).cuda(non_blocking=True), temp[1].type(torch.FloatTensor).cuda(non_blocking=True)) def forward(self, text): sim_batch_size = 8 batch_size = len(text[0]) if batch_size <= sim_batch_size: self.hidden = self.init_hidden(batch_size) # text = text[0:text.detach().tolist().index(tdict['b_b'])] tmp = {i for i in range(len(text)) if text[i].item() == tdict['b_b']} tmp = list(set(range(len(text))) - tmp) if not len(tmp): tmp = [0] text = text[tmp] result = self.word_embeddings(text) result = result.clone().view(len(text), batch_size, -1).cuda(non_blocking=True) result, _ = self.lstm(result, self.hidden) del self.hidden torch.cuda.empty_cache() torch.cuda.synchronize() return result else: now = 0 tmp = [] # print('batch_size: ' + str(batch_size)) while True: now_text = text[:, now:min(now + sim_batch_size, batch_size)] now_batch_size = len(now_text[0]) # print('now batch size: ' + str(now_batch_size)) self.hidden = self.init_hidden(now_batch_size) result = self.word_embeddings(now_text) result = result.clone().view(len(now_text), now_batch_size, -1).cuda(non_blocking=True) result, _ = self.lstm(result, self.hidden) # del self.hidden del now_text torch.cuda.empty_cache() torch.cuda.synchronize() tmp.append(result) if now + sim_batch_size >= batch_size: break now += sim_batch_size tmp = torch.cat(tmp, dim=1) torch.cuda.empty_cache() torch.cuda.synchronize() # print('before attention: ', tmp.size()) return tmp # batch_size = len(text[0]) # self.hidden = self.init_hidden(batch_size) # text = text[0:text.detach().tolist().index(tdict['b_b'])] # if batch_size == 1: # tmp = {i for i in range(len(text)) if text[i].item() == tdict['b_b']} # tmp = list(set(range(len(text))) - tmp) # if not len(tmp): # tmp = [0] # text = text[tmp] # result = self.word_embeddings(text) # result = result.clone().view(len(text), batch_size, -1).cuda(non_blocking=True) # result, _ = self.lstm(result, self.hidden) # del self.hidden # return result # Model for tweet-level semantics extraction from tweet vectors # Input : sequence of tweet vectors of a single user of size vector_num * 1 * tweet_vec_size # Output : vector_num * rep_size class SemanticTweet(nn.Module): def __init__(self, input_dim, rep_size, num_layer, p): super(SemanticTweet, self).__init__() self.hidden_dim = int(rep_size / 2) self.input_dim = input_dim self.rep_size = rep_size self.batch_size = 1 self.num_layer = num_layer self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, bidirectional=True, dropout=p, num_layers=num_layer) self.hidden = self.init_hidden() # self.hidden = (self.hidden[0].type(torch.FloatTensor).cuda(non_blocking=True), self.hidden[1].type(torch.FloatTensor).cuda(non_blocking=True)) # self.register_buffer('hidden', self.hidden) def init_hidden(self): temp = (torch.cuda.FloatTensor(2 * self.num_layer, self.batch_size, self.hidden_dim).fill_(0), torch.cuda.FloatTensor(2 * self.num_layer, self.batch_size, self.hidden_dim).fill_(0)) return temp # return (temp[0].type(torch.FloatTensor).cuda(non_blocking=True), temp[1].type(torch.FloatTensor).cuda(non_blocking=True)) def forward(self, vectors): self.hidden = self.init_hidden() # result = vectors.clone().view(len(vectors), self.batch_size, -1) # vectors = vectors.cuda(non_blocking = True) result, _ = self.lstm(vectors, self.hidden) result = result.squeeze(1) # del self.hidden return result # Aggregated semantic model # Input : user dict {'word' : torch.tensor([1,2,3,...]), 'tweet': tensor of size tweet_cnt * tweet_len} # Output : overall semantic representation of size rep_size class SemanticVector(nn.Module): def __init__(self, embedding_dim, rep_size, num_layer, dropout, embed_layer): super(SemanticVector, self).__init__() self.embedding_dim = embedding_dim self.rep_size = rep_size self.num_layer = num_layer self.dropout = dropout # self.embed_layer = embed_layer # self.embed_layer.weight.requires_grad = False self.WordLevelModel = SemanticWord(embedding_dim=self.embedding_dim, rep_size=int(self.rep_size / 2), batch_size=1, num_layer=self.num_layer, embed_layer=embed_layer, p=self.dropout) self.TweetLowModel = SemanticWord(embedding_dim=self.embedding_dim, rep_size=int(self.rep_size / 2), batch_size=1, num_layer=self.num_layer, embed_layer=embed_layer, p=self.dropout) self.TweetHighModel = SemanticTweet(input_dim=int(self.rep_size / 2), rep_size=int(self.rep_size / 2), num_layer=self.num_layer, p=dropout) self.WordAttention = Attention(vector_size=int(self.rep_size / 2)) self.TweetLowAttention = Attention(vector_size=int(self.rep_size / 2)) self.TweetHighAttention = Attention(vector_size=int(self.rep_size / 2)) def forward(self, user): text_word = user['word'] # text_word = text_word.unsqueeze(1) WordLevelRep = self.WordAttention(self.WordLevelModel(text_word.unsqueeze(1)).squeeze(1)) del text_word torch.cuda.empty_cache() torch.cuda.synchronize() text_tweet = user['tweet'] # one tweet each row # TweetRep = [] # for i in range(len(text_tweet)): # TweetRep.append( # self.TweetLowAttention(self.TweetLowModel(text_tweet[i, :].unsqueeze(1)).squeeze(1)).tolist()) TweetRep = self.TweetLowAttention(self.TweetLowModel(text_tweet.transpose(0, 1))) del text_tweet # print('user tweet low finish') torch.cuda.empty_cache() torch.cuda.synchronize() # vec_tweet = torch.tensor(TweetRep).unsqueeze(1) TweetLevelRep = self.TweetHighAttention(self.TweetHighModel(TweetRep.unsqueeze(1))) # del TweetRep torch.cuda.empty_cache() torch.cuda.synchronize() # print('a user semantic finish') return torch.cat((WordLevelRep, TweetLevelRep)) # Model for transforming the properties vector # Input : property vectors of size vector_num * input_size # Output : representations of size vector_num * rep_size class Properties(nn.Module): def __init__(self, input_size, rep_size, dropout): super(Properties, self).__init__() self.input_size = input_size self.rep_size = rep_size # self.fc1 = nn.Linear(self.input_size, self.input_size) # self.fc2 = nn.Linear(self.input_size, self.rep_size) # self.fc3 = nn.Linear(self.rep_size, self.rep_size) # self.fc1.bias.data.fill_(0) # self.fc2.bias.data.fill_(0) # self.fc3.bias.data.fill_(0) # self.act1 = nn.ReLU() # self.act2 = nn.ReLU() # self.act3 = nn.ReLU() # self.dropout1 = nn.Dropout(p=dropout) # self.dropout2 = nn.Dropout(p=dropout) self.fc = nn.Linear(self.input_size, self.rep_size) self.act = nn.ReLU() def forward(self, vectors): # vectors = self.dropout1(self.act1(self.fc1(vectors))) # vectors = self.dropout2(self.act2(self.fc2(vectors))) # vectors = self.act3(self.fc3(vectors)) vectors = self.act(self.fc(vectors)) return vectors # Co attention model # Input : user {'semantic' : batch_size * 1 * vec_size, 'property' : batch_size * 1 * vec_size, 'neighbor' : batch_size * 1 * (2 * vec_size)} # Output : final representation of size batch_size * 1 * vec_size class CoAttention(nn.Module): def __init__(self, vec_size): super(CoAttention, self).__init__() self.vec_size = vec_size self.Wsp = nn.Parameter(torch.randn(self.vec_size, self.vec_size)) self.Wpn = nn.Parameter(torch.randn(self.vec_size, 2 * self.vec_size)) self.Wns = nn.Parameter(torch.randn(2 * self.vec_size, self.vec_size)) self.Ws = nn.Parameter(torch.randn(self.vec_size, self.vec_size)) self.Wp = nn.Parameter(torch.randn(self.vec_size, self.vec_size)) self.Wn = nn.Parameter(torch.randn(self.vec_size, 2 * self.vec_size)) self.Wh = nn.Parameter(torch.randn(3 * self.vec_size, self.vec_size)) self.fc = nn.Linear(2 * vec_size, 2 * vec_size) self.act = nn.ReLU() def forward(self, user): # Vs Vp Vn tensor of size vec_size x 1 Vs = torch.transpose(user['semantic'], 1, 2) Vp = torch.transpose(user['property'], 1, 2) Vn = torch.transpose(self.act(self.fc(user['neighbor'])), 1, 2) #print('Vs size ', Vs.size()) #print('Vp size ', Vp.size()) #print('Vn size ', Vn.size()) #print('Wsp size ', Wsp.size()) Fsp = torch.tanh(torch.transpose(Vs, 1, 2).matmul(self.Wsp).matmul(Vp)) Fpn = torch.tanh(torch.transpose(Vp, 1, 2).matmul(self.Wpn).matmul(Vn)) Fns = torch.tanh(torch.transpose(Vn, 1, 2).matmul(self.Wns).matmul(Vs)) Hs = torch.tanh(self.Ws.matmul(Vs) + self.Wp.matmul(Vp) * Fsp + self.Wn.matmul(Vn) * Fns) Hp = torch.tanh(self.Wp.matmul(Vp) + self.Ws.matmul(Vs) * Fsp + self.Wn.matmul(Vn) * Fpn) Hn = torch.tanh(self.Wn.matmul(Vn) + self.Ws.matmul(Vs) * Fns + self.Wp.matmul(Vp) * Fpn) # V = torch.cat((torch.cat((Vs, Vp)), Vn)) H = torch.cat((torch.cat((Hs, Hp), dim = 1), Hn), dim = 1) # rep = torch.cat((V, H)) result = torch.tanh(torch.transpose(H, 1, 2).matmul(self.Wh)) del Vs,Vp,Vn,Fsp,Fpn,Fns,Hs,Hp,Hn,H return result # Model to predict follower from vector representation # Input : user representation of size vector_num * vec_size # Output : log softmax of size vector_num * label_size class FollowerClassification(nn.Module): def __init__(self, vec_size, label_size, dropout): super(FollowerClassification, self).__init__() self.vec_size = vec_size self.label_size = label_size # self.fc1 = nn.Linear(self.vec_size, self.vec_size) self.fc2 = nn.Linear(self.vec_size, self.label_size) # self.fc1.bias.data.fill_(0) # self.fc2.bias.data.fill_(0) # self.act1 = nn.ReLU() # self.dropout1 = nn.Dropout(p=dropout) def forward(self, vector): # result = self.dropout1(self.act1(self.fc1(vector))) # print(result.size()) result = F.log_softmax(self.fc2(vector), dim=1) return result # Model to predict a batch of users' followers # Input : a batch of users via DataLoader, user information specified in forward() annotation # Output : a batch of users' follower classification(log_softmax) of size batch_size * label_size ; # semantic repersentation of list size batch_size * rep_size # property representation of tensor size batch_size * rep_size class ModelBatch(nn.Module): def __init__(self, EMBEDDING_DIM, REP_SIZE, NUM_LAYER, DROPOUT, EMBED_LAYER, PROPERTY_SIZE, LABEL_SIZE): super(ModelBatch, self).__init__() self.SemanticModel = SemanticVector(embedding_dim=EMBEDDING_DIM, rep_size=REP_SIZE, num_layer=NUM_LAYER, dropout=DROPOUT, embed_layer=EMBED_LAYER) self.PropertyModel = Properties(input_size=PROPERTY_SIZE, dropout=DROPOUT, rep_size=REP_SIZE) self.CoAttentionModel = CoAttention(vec_size=REP_SIZE) self.FollowerPredictModel = FollowerClassification(vec_size=REP_SIZE, label_size=LABEL_SIZE, dropout=DROPOUT) def forward(self, user_batch): # each user shall contain # 'word' : torch.tensor([1,2,3,..]) # 'tweet' : tensor of size tweet_cnt * tweet_len # 'property' : tensor of size 1 * PROPERTY_SIZE # 'neighbor' : tensor of size 1 * REP_SIZE # semantic vector extraction semantic_reps = [] for i in range(len(user_batch['word'])): semantic_reps.append(self.SemanticModel({'word': user_batch['word'][i], 'tweet': user_batch['tweet'][i]})) # print('semantic finish') # property vector extraction property_reps = self.PropertyModel(user_batch['property']) #
0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.139305, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 4.58383, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.20269, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 1.01201e-05, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.151775, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.244808, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.123571, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.520154, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.173585, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.16253, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.91191e-06, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00636614, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0460364, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0470815, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.0460384, 'Execution Unit/Register Files/Runtime Dynamic': 0.0534476, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0969866, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.248685, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.43035, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00220604, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00220604, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00198473, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000802925, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000676329, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00707315, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0188908, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0452606, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.87897, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.170451, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.153726, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 5.2372, 'Instruction Fetch Unit/Runtime Dynamic': 0.395401, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0352485, 'L2/Runtime Dynamic': 0.00747208, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 2.59946, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.662885, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0440749, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.044075, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 2.80759, 'Load Store Unit/Runtime Dynamic': 0.924323, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.108681, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.217363, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0385713, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0389476, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.179004, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0283965, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.401371, 'Memory Management Unit/Runtime Dynamic': 0.0673441, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 16.2334, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 4.49838e-06, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.00684774, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0769513, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
<reponame>MosHumanoid/bitbots_thmos_meta<gh_stars>0 #!/usr/bin/python3 import os import rospy import rosnode import roslaunch import rospkg import rostopic from bitbots_msgs.msg import FootPressure from diagnostic_msgs.msg import DiagnosticStatus from geometry_msgs.msg import Twist from std_srvs.srv import Empty import actionlib from bitbots_msgs.msg import KickGoal, KickAction, KickFeedback from geometry_msgs.msg import Vector3, Quaternion from tf.transformations import quaternion_from_euler import dynamic_reconfigure.client from sensor_msgs.msg import JointState, Imu class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def print_warn(str): print(bcolors.WARNING + str + bcolors.ENDC) def print_info(str): print(bcolors.OKGREEN + str + bcolors.ENDC) def input_info(str): return input(bcolors.OKBLUE + str + bcolors.ENDC) diag_status = None had_diag_error = False had_diag_stale = False had_diag_warn = False def diagnostic_cb(msg: DiagnosticStatus): global diag_status, had_diag_warn, had_diag_error, had_diag_stale diag_status = msg.level if msg.level == DiagnosticStatus.WARN: had_diag_warn = True elif msg.level == DiagnosticStatus.ERROR: had_diag_error = True elif msg.level == DiagnosticStatus.STALE: had_diag_stale = True left_pressure = None right_pressure = None def pressure_cb(msg, is_left=True): global left_pressure global right_pressure if is_left: left_pressure = msg else: right_pressure = msg def is_motion_started(): node_names = rosnode.get_node_names("/") started = True nodes_in_motion = {"/ros_control", "/hcm", "/walking", "/animation", "/dynamic_kick", "/motion_odometry", "/odometry_fuser", "/DynupNode"} for node in nodes_in_motion: if not node in node_names: print_info(F"{node} not running") started = False return started def check_pressure(msg, min, max, foot_name): okay = True if msg.left_front < min or msg.left_front > max: print_warn(F" {foot_name} left_front out of limits. Min {min} Max {max} Value {round(msg.left_front, 2)}\n") okay = False if msg.left_back < min or msg.left_back > max: print_warn(F" {foot_name} left_back out of limits. Min {min} Max {max} Value {round(msg.left_back, 2)}\n") okay = False if msg.right_back < min or msg.right_back > max: print_warn(F" {foot_name} right_back out of limits. Min {min} Max {max} Value {round(msg.right_back, 2)}\n") okay = False if msg.right_front < min or msg.right_front > max: print_warn(F" {foot_name} right_front out of limits. Min {min} Max {max} Value {round(msg.right_front, 2)}\n") okay = False return okay got_pwm = False def pwm_callback(msg:JointState): global got_pwm got_pwm= True for effort in msg.effort: if effort == 100: print_warn("Servo reported max PWM value. It is working at its limit!\n") ACCEL_LIMIT = 35 ANGULAR_VEL_LIMIT = 10 def imu_callback(msg: Imu): for accel in [msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z]: if abs(accel) > ACCEL_LIMIT: print_warn("IMU over accel limit! Orientation estimation will suffer.\n") for angular_vel in [msg.angular_velocity.x, msg.angular_velocity.y, msg.angular_velocity.z]: if abs(angular_vel) > ANGULAR_VEL_LIMIT: print_warn("IMU over angular vel limit! Orientation estimation will suffer.\n") if __name__ == '__main__': print_info("### This script will check the robot hardware and motions. Please follow the instructions\n") rospy.init_node("checker") # start subscribers imu_sub = rospy.Subscriber("imu/data", Imu, imu_callback, tcp_nodelay=True) pwm_sub = rospy.Subscriber("servo_PWM", JointState, pwm_callback, tcp_nodelay=True) # start necessary software print_info("First the motion software will be started. Please hold the robot, turn on servo power and press enter.\n") input_info("Press Enter to continue...") uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) roslaunch.configure_logging(uuid) rospack = rospkg.RosPack() launch = roslaunch.parent.ROSLaunchParent(uuid, [ rospack.get_path('bitbots_bringup') + "/launch/motion_standalone.launch"]) launch.start() rospy.sleep(5) while True: if is_motion_started(): rospy.sleep(5) break else: print_info("Waiting for software to be started \n") rospy.sleep(1) print_info("\n\n") # check diagnostic status print_info("Will check diagnostic status of robot.\n") diag_sub = rospy.Subscriber("diagnostics_toplevel_state", DiagnosticStatus, diagnostic_cb, tcp_nodelay=True) rospy.sleep(3) if not (had_diag_stale or had_diag_warn or had_diag_error): print_info(" Diagnostic status okay.") else: print_warn(" Diagnostics report problem. Please use rqt_monitor to investigate.") if had_diag_error: print_warn(" There were errors.") if had_diag_warn: print_warn(" There were warnings.") if had_diag_stale: print_warn(" There were stales.") input_info("press enter when the issue is resolved") had_diag_stale = False had_diag_warn = False had_diag_error = False print_info("\n\n") # check publishing frequency of imu, servos, pwm, goals, pressure, robot_state # the topics which will be checked with minimal publishing rate topics_to_check = {"imu/data": 900, "joint_states": 900, "robot_state": 900, "foot_pressure_left/raw": 900, "foot_pressure_left/filtered": 900, "foot_pressure_right/raw": 900, "foot_pressure_right/filtered": 900, "core/vdxl": 9, "diagnostics_toplevel_state": 9} rts = [] for topic in topics_to_check.keys(): msg_class, real_topic, _ = rostopic.get_topic_class(topic) if real_topic is None or msg_class is None: print_warn(F"Problem with topic {topic}") else: rt = rostopic.ROSTopicHz(-1) rt_sub = rospy.Subscriber(topic, msg_class, rt.callback_hz, callback_args=topic, tcp_nodelay=True) rts.append((rt, rt_sub)) print_info("Please wait a few seconds for publishing rates of topics to be evaluated\n") rospy.sleep(5) print_info("Topics have been evaluated:\n") i = 0 for topic in topics_to_check.keys(): rate = rts[i][0].get_hz(topic)[0] if rate is None or rate < topics_to_check[topic]: print_warn(F" Low rate on Topic {topic}: {round(rate, 2)} \n") else: print_info(F" Okay rate Topic {topic}: {round(rate, 2)} \n") i += 1 # check pressure values when robot in air print_info("\n\n") print_info("We will check the foot pressure sensors next\n") input_info("Please hold the robot in the air so that the feet don't touch the ground and press enter.") left_pressure_sub = rospy.Subscriber("foot_pressure_left/filtered", FootPressure, pressure_cb, callback_args=True, tcp_nodelay=True) right_pressure_sub = rospy.Subscriber("foot_pressure_right/filtered", FootPressure, pressure_cb, callback_args=False, tcp_nodelay=True) rospy.sleep(0.5) while (not left_pressure) and (not right_pressure) and (not rospy.is_shutdown()): rospy.loginfo_throttle(1, "Waiting to receive pressure msgs\n") print_info("Pressure messages received\n") both_okay = True both_okay = both_okay and check_pressure(left_pressure, -1, 1, "left") both_okay = both_okay and check_pressure(right_pressure, -1, 1, "right") if not both_okay: print_warn("Pressure not correctly zero. Will try to call zero service.\n") # call zero service zero_l = rospy.ServiceProxy("foot_pressure_left/set_foot_zero", Empty) zero_r = rospy.ServiceProxy("foot_pressure_right/set_foot_zero", Empty) zero_l() zero_r() # wait and check again rospy.sleep(1) both_okay = True both_okay = both_okay and check_pressure(left_pressure, -1, 1, "left") both_okay = both_okay and check_pressure(right_pressure, -1, 1, "right") if both_okay: print_info("Pressures correct after calling zero service. You can continue normally.\n") else: print_warn("Pressures still wrong. Please investigate the problem.\n") # check pressure values when robot in walkready input_info("Please put the robot standing on the ground and press enter") both_okay = True both_okay = both_okay and check_pressure(left_pressure, 20, 40, "left") both_okay = both_okay and check_pressure(right_pressure, 20, 40, "right") if both_okay: print_info("Pressure seems to be okay\n") else: print_warn("Problem with pressure. " "Please recalibrate the sensors using rosrun bitbots_ros_control pressure_calibaration\n") # check servo PWM status print_info("We will check the servo PWM status now. This gives information if any servo is going on maximum torque.\n") print_info("Will now try to activate PWM reading and wait till they are recieved.\n") dyn_reconf_client = dynamic_reconfigure.client.Client("/ros_control/servos", timeout=10.0) dyn_reconf_client.update_configuration({'read_pwm': True}) while not got_pwm: rospy.sleep(0.1) print_info("Got PWM values, will continue.\n") # check fall front input_info("\nNext we will check front falling detection.\n " "Please let the robot fall on its belly, but hold it to prevent damage. " "The robot should perform a safety motion. Afterwards it should stand up by itself. " "Press enter when you're done.\n") input_info("\nNext we will check back falling detection.\n " "Please let the robot fall on its back, but hold it to prevent damage. " "The robot should perform a safety motion. Afterwards it should stand up by itself. " "Press enter when you're done.\n") # check walk motion walk_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1) text = input_info( "\nWe will check walking of the robot. After pressing enter, robot will start walking in different directions. " "It will stop when it is finished. Please make sure there is space and catch it if it falls. Press y if you want to check walking.") if text == "y": cmd_msg = Twist() cmd_msg.linear.x = 0.1 walk_pub.publish(cmd_msg) rospy.sleep(5) cmd_msg.linear.x = -0.1 walk_pub.publish(cmd_msg) rospy.sleep(5) cmd_msg.linear.x = 0.0 cmd_msg.linear.y = 0.1 walk_pub.publish(cmd_msg) rospy.sleep(5) cmd_msg.linear.y = -0.1 walk_pub.publish(cmd_msg) rospy.sleep(5) cmd_msg.linear.y = 0 walk_pub.publish(cmd_msg) rospy.sleep(1) # check kick motion text = input_info("\nWe will check the kick motion. Please hold make sure the robot is safe. " "Press y if you want to perform this test.") if text == 'y': def done_cb(state, result): print_info('Action completed: ', end='') if state == GoalStatus.PENDING: print_info('Pending') elif state == GoalStatus.ACTIVE: print_info('Active') elif state == GoalStatus.PREEMPTED: print_info('Preempted') elif state == GoalStatus.SUCCEEDED: print_info('Succeeded') elif state == GoalStatus.ABORTED: print_info('Aborted') elif state == GoalStatus.REJECTED: print_info('Rejected') elif state == GoalStatus.PREEMPTING: print_info('Preempting') elif state == GoalStatus.RECALLING: print_info('Recalling') elif state == GoalStatus.RECALLED: print_info('Recalled') elif state == GoalStatus.LOST: print_info('Lost') else: print_info('Unknown state', state) print_info(str(result)) def active_cb(): print_info("Server accepted action") def feedback_cb(feedback): if len(sys.argv) > 1 and sys.argv[1] == '--feedback': print_info('Feedback') print_info(feedback) print_info() goal = KickGoal() goal.header.stamp = rospy.Time.now() goal.header.frame_id = "base_footprint" goal.ball_position.x = 0.2 goal.ball_position.y = -0.09 goal.ball_position.z = 0 goal.kick_direction = Quaternion(*quaternion_from_euler(0, 0, 0)) goal.kick_speed = 1 client = actionlib.SimpleActionClient('dynamic_kick', KickAction) client.done_cb = done_cb client.feedback_cb = feedback_cb client.active_cb = active_cb client.send_goal(goal) client.wait_for_result() if not (had_diag_stale or had_diag_warn or had_diag_error): print_info("Diagnostic status were okay during motions.") else: print_warn("Diagnostics report problem. Please use rqt_monitor to investigate.") if had_diag_error: print_warn(" There
<filename>pygsm/growing_string_methods/se_gsm.py from __future__ import print_function # local application imports sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane from .main_gsm import MainGSM from wrappers import Molecule from utilities import nifty # standard library imports import sys import os from os import path # third party from collections import Counter import numpy as np class SE_GSM(MainGSM): def __init__( self, options, ): super(SE_GSM, self).__init__(options) self.current_nnodes = 1 print(" Assuming the isomers are initialized!") print(" Primitive Internal Coordinates") print(self.nodes[0].primitive_internal_coordinates[0:50]) print(" number of primitives is", self.nodes[0].num_primitives) print('Driving Coordinates: ') print(self.driving_coords) sys.stdout.flush() # stash bdist for node 0 ictan, self.nodes[0].bdist = self.get_tangent( self.nodes[0], None, driving_coords=self.driving_coords, ) self.nodes[0].update_coordinate_basis(constraints=ictan) def set_V0(self): self.nodes[0].V0 = self.nodes[0].energy #TODO should be actual gradient self.nodes[0].gradrms = 0. def isomer_init(self): ''' The purpose of this function is to add to the primitives the driving coordinate prims if they dont exist. This is depracated because it's better to build the topology properly before initializing GSM. See main.py ''' # TODO ANGLE, TORSION or OOP between fragments will not work if using TRIC with BLOCK LA changed_top = False # TODO first check if there is any add/break then rebuild topology and makePrimitives for i in self.driving_coords: if "ADD" in i or "BREAK" in i: # order if i[1] < i[2]: bond = Distance(i[1]-1, i[2]-1) else: bond = Distance(i[2]-1, i[1]-1) self.nodes[0].coord_obj.Prims.add(bond, verbose=True) changed_top = True if "ANGLE" in i: if i[1] < i[3]: angle = Angle(i[1]-1, i[2]-1, i[3]-1) else: angle = Angle(i[3]-1, i[2]-1, i[1]-1) self.nodes[0].coord_obj.Prims.add(angle, verbose=True) if "TORSION" in i: if i[1] < i[4]: torsion = Dihedral(i[1]-1, i[2]-1, i[3]-1, i[4]-1) else: torsion = Dihedral(i[4]-1, i[3]-1, i[2]-1, i[1]-1) self.nodes[0].coord_obj.Prims.add(torsion, verbose=True) if "OOP" in i: if i[1] < i[4]: oop = OutOfPlane(i[1]-1, i[2]-1, i[3]-1, i[4]-1) else: oop = OutOfPlane(i[4]-1, i[3]-1, i[2]-1, i[1]-1) self.nodes[0].coord_obj.Prims.add(oop, verbose=True) self.nodes[0].coord_obj.Prims.clearCache() if changed_top: self.nodes[0].coord_obj.Prims.rebuild_topology_from_prim_bonds(self.nodes[0].xyz) self.nodes[0].coord_obj.Prims.reorderPrimitives() self.nodes[0].update_coordinate_basis() def go_gsm(self, max_iters=50, opt_steps=10, rtype=2): """ rtype=2 Find and Climb TS, 1 Climb with no exact find, 0 turning of climbing image and TS search """ self.set_V0() if self.isRestarted is False: self.nodes[0].gradrms = 0. self.nodes[0].V0 = self.nodes[0].energy print(" Initial energy is %1.4f" % self.nodes[0].energy) self.add_GSM_nodeR() self.grow_string(max_iters=max_iters, max_opt_steps=opt_steps) if self.tscontinue: try: if self.pastts == 1: #normal over the hill self.add_GSM_nodeR(1) self.add_last_node(2) elif self.pastts == 2 or self.pastts==3: #when cgrad is positive self.add_last_node(1) if self.nodes[self.nR-1].gradrms > 5.*self.options['CONV_TOL']: self.add_last_node(1) elif self.pastts == 3: #product detected by bonding self.add_last_node(1) except: print("Failed to add last node, continuing.") self.nnodes = self.nR self.nodes = self.nodes[:self.nR] energies = self.energies if self.TSnode == self.nR-1: print(" The highest energy node is the last") print(" not continuing with TS optimization.") self.tscontinue = False print(" Number of nodes is ", self.nnodes) print(" Warning last node still not optimized fully") self.xyz_writer('grown_string_{:03}.xyz'.format(self.ID), self.geometries, self.energies, self.gradrmss, self.dEs) print(" SSM growth phase over") self.done_growing = True print(" beginning opt phase") print("Setting all interior nodes to active") for n in range(1, self.nnodes-1): self.active[n] = True self.active[self.nnodes-1] = False self.active[0] = False if not self.isRestarted: print(" initial ic_reparam") self.reparameterize(ic_reparam_steps=25) print(" V_profile (after reparam): ", end=' ') energies = self.energies for n in range(self.nnodes): print(" {:7.3f}".format(float(energies[n])), end=' ') print() self.xyz_writer('grown_string1_{:03}.xyz'.format(self.ID), self.geometries, self.energies, self.gradrmss, self.dEs) if self.tscontinue: self.optimize_string(max_iter=max_iters, opt_steps=3, rtype=rtype) # opt steps fixed at 3 for rtype=1 and 2, else set it to be the large number :) muah hahaahah else: print("Exiting early") self.end_early = True filename = "opt_converged_{:03d}.xyz".format(self.ID) print(" Printing string to " + filename) self.xyz_writer(filename, self.geometries, self.energies, self.gradrmss, self.dEs) print("Finished GSM!") def add_last_node(self, rtype): assert rtype == 1 or rtype == 2, "rtype must be 1 or 2" noptsteps = 100 if self.nodes[self.nR-1].PES.lot.do_coupling: opt_type = 'MECI' else: opt_type = 'UNCONSTRAINED' if rtype == 1: print(" copying last node, opting") self.nodes[self.nR] = Molecule.copy_from_options(self.nodes[self.nR-1], new_node_id=self.nR) print(" Optimizing node %i" % self.nR) self.optimizer[self.nR].conv_grms = self.options['CONV_TOL'] self.optimizer[self.nR].conv_gmax = self.options['CONV_gmax'] self.optimizer[self.nR].conv_Ediff = self.options['CONV_Ediff'] self.optimizer[self.nR].conv_dE = self.options['CONV_dE'] path = os.path.join(os.getcwd(), 'scratch/{:03d}/{}'.format(self.ID, self.nR)) self.optimizer[self.nR].optimize( molecule=self.nodes[self.nR], refE=self.nodes[0].V0, opt_steps=noptsteps, opt_type=opt_type, path=path, ) self.active[self.nR] = True if (self.nodes[self.nR].xyz == self.nodes[self.nR-1].xyz).all(): print(" Opt did not produce new geometry") else: self.nR += 1 elif rtype == 2: print(" already created node, opting") self.optimizer[self.nR-1].conv_grms = self.options['CONV_TOL'] self.optimizer[self.nR-1].conv_gmax = self.options['CONV_gmax'] self.optimizer[self.nR-1].conv_Ediff = self.options['CONV_Ediff'] self.optimizer[self.nR-1].conv_dE = self.options['CONV_dE'] path = os.path.join(os.getcwd(), 'scratch/{:03d}/{}'.format(self.ID, self.nR-1)) self.optimizer[self.nR-1].optimize( molecule=self.nodes[self.nR-1], refE=self.nodes[0].V0, opt_steps=noptsteps, opt_type=opt_type, path=path, ) # print(" Aligning") # self.nodes[self.nR-1].xyz = self.com_rotate_move(self.nR-2,self.nR,self.nR-1) return def grow_nodes(self): if self.nodes[self.nR-1].gradrms < self.options['ADD_NODE_TOL']: if self.nR == self.nnodes: print(" Ran out of nodes, exiting GSM") raise ValueError if self.nodes[self.nR] is None: self.add_GSM_nodeR() print(" getting energy for node %d: %5.4f" % (self.nR-1, self.nodes[self.nR-1].energy - self.nodes[0].V0)) return def add_GSM_nodes(self, newnodes=1): if self.nn+newnodes > self.nnodes: print("Adding too many nodes, cannot interpolate") for i in range(newnodes): self.add_GSM_nodeR() def ic_reparam_g(self, ic_reparam_steps=4, n0=0, nconstraints=1): # see line 3863 of gstring.cpp ''' Dont do ic_reparam_g for SE-GSM ''' return def set_frontier_convergence(self, nR): # set self.optimizer[nR].conv_grms = self.options['ADD_NODE_TOL'] self.optimizer[nR].conv_gmax = 100. # self.options['ADD_NODE_TOL'] # could use some multiplier times CONV_GMAX... self.optimizer[nR].conv_Ediff = 1000. # 2.5 print(" conv_tol of node %d is %.4f" % (nR, self.optimizer[nR].conv_grms)) def set_active(self, nR, nP=None): # print(" Here is active:",self.active) print((" setting active node to %i " % nR)) for i in range(self.nnodes): if self.nodes[i] is not None: self.active[i] = False self.set_frontier_convergence(nR) self.active[nR] = True # print(" Here is new active:",self.active) def make_tan_list(self): ncurrent, nlist = self.make_difference_node_list() param_list = [] for n in range(ncurrent-1): if nlist[2*n] not in param_list: param_list.append(nlist[2*n]) return param_list def make_move_list(self): ncurrent, nlist = self.make_difference_node_list() param_list = [] for n in range(ncurrent): if nlist[2*n+1] not in param_list: param_list.append(nlist[2*n+1]) return param_list def make_difference_node_list(self): ncurrent = 0 nlist = [0]*(2*self.nnodes) for n in range(self.nR-1): nlist[2*ncurrent] = n nlist[2*ncurrent+1] = n+1 ncurrent += 1 nlist[2*ncurrent+1] = self.nR - 1 nlist[2*ncurrent] = self.nR - 1 ncurrent += 1 return ncurrent, nlist def past_ts(self): ''' ''' ispast = ispast1 = ispast2 = ispast3 = 0 THRESH1 = 5. THRESH2 = 3. THRESH3 = -1. # THRESHB = 0.05 CTHRESH = 0.005 OTHRESH = -0.015 emax = -100. nodemax = 1 # n0 is zero until after finished growing ns = self.n0-1 if ns < nodemax: ns = nodemax print(" Energies", end=' ') energies = self.energies for n in range(ns, self.nR): print(" {:4.3f}".format(energies[n]), end=' ') if energies[n] > emax: nodemax = n emax = energies[n] print("\n nodemax ", nodemax) for n in range(nodemax, self.nR): if energies[n] < emax-THRESH1: ispast1 += 1 if energies[n] < emax-THRESH2: ispast2 += 1 if energies[n] < emax-THRESH3: ispast3 += 1 if ispast1 > 1: break print(" ispast1", ispast1) print(" ispast2", ispast2) print(" ispast3", ispast3) # TODO 5/9/2019 what about multiple constraints # Done 6/23/2019 constraints = self.nodes[self.nR-1].constraints[:, 0] gradient = self.nodes[self.nR-1].gradient overlap = np.dot(gradient.T, constraints) cgrad = overlap*constraints cgrad = np.linalg.norm(cgrad)*np.sign(overlap) print((" cgrad: %4.3f nodemax: %i nR: %i" % (cgrad, nodemax, self.nR))) # 6/17 THIS should check if the last node is high in energy if cgrad > CTHRESH and not self.nodes[self.nR-1].PES.lot.do_coupling and nodemax != self.TSnode: print(" constraint gradient positive") ispast = 2 elif ispast1 > 0 and cgrad > OTHRESH: print(" over the hill(1)") ispast = 1 elif ispast2 > 1: print(" over the hill(2)") ispast = 1 else: ispast = 0 if ispast == 0: bch = self.check_for_reaction_g(1, self.driving_coords) if ispast3 > 1 and bch: print("over the hill(3) connection changed %r " % bch) ispast = 3 print(" ispast=", ispast) return ispast def check_if_grown(self): ''' Check if the string is grown Returns True if grown ''' self.pastts = self.past_ts() isDone = False # TODO break planes condition1 = (abs(self.nodes[self.nR-1].bdist) <= (1-self.BDIST_RATIO)*abs(self.nodes[0].bdist)) print(" bdist %.3f" % self.nodes[self.nR-1].bdist) fp = self.find_peaks('growing') if self.pastts and self.current_nnodes > 3 and condition1: # TODO extra criterion here print(" pastts is ", self.pastts) if self.TSnode == self.nR-1: print(" The highest energy node is the last") print(" not continuing with TS optimization.") self.tscontinue = False nifty.printcool("Over the hill") isDone = True elif fp == -1 and self.energies[self.nR-1] > 200. and self.nodes[self.nR-1].gradrms > self.options['CONV_TOL']*5: print("growth_iters over: all uphill and high energy") self.end_early = 2 self.tscontinue = False self.nnodes = self.nR isDone = True elif fp == -2: print("growth_iters over: all uphill and flattening out") self.end_early = 2 self.tscontinue = False self.nnodes = self.nR isDone
<filename>manila/tests/api/v2/test_share_servers.py # Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api.v2 import share_servers from manila.common import constants from manila import context as ctx_api from manila.db import api as db_api from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila import utils @ddt.ddt class ShareServerControllerTest(test.TestCase): """Share server api test""" def setUp(self): super(ShareServerControllerTest, self).setUp() self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.controller = share_servers.ShareServerController() self.resource_name = self.controller.resource_name @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_MANAGING, constants.STATUS_UNMANAGING, constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR) def test_share_server_reset_status(self, status): req = fakes.HTTPRequest.blank('/v2/share-servers/fake-share-server/', use_admin_context=True, version="2.49") body = {'reset_status': {'status': status}} context = req.environ['manila.context'] mock_update = self.mock_object(db_api, 'share_server_update') result = self.controller.share_server_reset_status( req, 'fake_server_id', body) self.assertEqual(202, result.status_int) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') mock_update.assert_called_once_with( context, 'fake_server_id', {'status': status}) def test_share_reset_server_status_invalid(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") body = {'reset_status': {'status': constants.STATUS_EXTENDING}} context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body=body) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def test_share_server_reset_status_no_body(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body={}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def test_share_server_reset_status_no_status(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body={'reset_status': {}}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def _setup_manage_test_request_body(self): body = { 'share_network_id': 'fake_net_id', 'share_network_subnet_id': 'fake_subnet_id', 'host': 'fake_host', 'identifier': 'fake_identifier', 'driver_options': {'opt1': 'fake_opt1', 'opt2': 'fake_opt2'}, } return body @ddt.data('fake_net_name', '') def test_manage(self, share_net_name): """Tests share server manage""" req = fakes.HTTPRequest.blank('/v2/share-servers/', use_admin_context=True, version="2.49") context = req.environ['manila.context'] share_network = db_utils.create_share_network(name=share_net_name) share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) share_server = db_utils.create_share_server( share_network_subnet_id=share_net_subnet['id'], host='fake_host', identifier='fake_identifier', is_auto_deletable=False) self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnet', mock.Mock(return_value=share_net_subnet)) self.mock_object(utils, 'validate_service_host') body = { 'share_server': self._setup_manage_test_request_body() } manage_share_server_mock = self.mock_object( share_api.API, 'manage_share_server', mock.Mock(return_value=share_server)) result = self.controller.manage(req, body) expected_result = { 'share_server': { 'id': share_server['id'], 'project_id': 'fake', 'updated_at': None, 'status': constants.STATUS_ACTIVE, 'host': 'fake_host', 'share_network_id': share_server['share_network_subnet']['share_network_id'], 'created_at': share_server['created_at'], 'backend_details': {}, 'identifier': share_server['identifier'], 'is_auto_deletable': share_server['is_auto_deletable'], } } if share_net_name != '': expected_result['share_server']['share_network_name'] = ( 'fake_net_name') else: expected_result['share_server']['share_network_name'] = ( share_net_subnet['share_network_id']) req_params = body['share_server'] manage_share_server_mock.assert_called_once_with( context, req_params['identifier'], req_params['host'], share_net_subnet, req_params['driver_options']) self.assertEqual(expected_result, result) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test_manage_invalid(self): req = fakes.HTTPRequest.blank('/manage_share_server', use_admin_context=True, version="2.49") context = req.environ['manila.context'] share_network = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) body = { 'share_server': self._setup_manage_test_request_body() } self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnet', mock.Mock(return_value=share_net_subnet)) manage_share_server_mock = self.mock_object( share_api.API, 'manage_share_server', mock.Mock(side_effect=exception.InvalidInput('foobar'))) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) req_params = body['share_server'] manage_share_server_mock.assert_called_once_with( context, req_params['identifier'], req_params['host'], share_net_subnet, req_params['driver_options']) def test_manage_forbidden(self): """Tests share server manage without admin privileges""" req = fakes.HTTPRequest.blank('/manage_share_server', version="2.49") error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage_share_server', error) share_network = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnet', mock.Mock(return_value=share_net_subnet)) self.mock_object(utils, 'validate_service_host') body = { 'share_server': self._setup_manage_test_request_body() } self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, req, body) def test__validate_manage_share_server_validate_no_body(self): """Tests share server manage""" req = fakes.HTTPRequest.blank('/manage', version="2.49") body = {} self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, req, body) @ddt.data({'empty': False, 'key': 'host'}, {'empty': False, 'key': 'share_network_id'}, {'empty': False, 'key': 'identifier'}, {'empty': True, 'key': 'host'}, {'empty': True, 'key': 'share_network_id'}, {'empty': True, 'key': 'identifier'}) @ddt.unpack def test__validate_manage_share_server_validate_without_parameters( self, empty, key): """Tests share server manage without some parameters""" req = fakes.HTTPRequest.blank('/manage_share_server', version="2.49") self.mock_object(share_api.API, 'manage_share_server', mock.Mock()) body = { 'share_server': self._setup_manage_test_request_body(), } if empty: body['share_server'][key] = None else: body['share_server'].pop(key) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) @ddt.data( (webob.exc.HTTPBadRequest, exception.ServiceNotFound('foobar')), (webob.exc.HTTPBadRequest, exception.ServiceIsDown('foobar')), (webob.exc.HTTPForbidden, exception.PolicyNotAuthorized('foobar')), (webob.exc.HTTPForbidden, exception.AdminRequired()) ) @ddt.unpack def test__validate_manage_share_server_validate_service_host( self, exception_to_raise, side_effect_exception): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] error = mock.Mock(side_effect=side_effect_exception) self.mock_object(utils, 'validate_service_host', error) share_network = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnet', mock.Mock(return_value=share_net_subnet)) self.assertRaises( exception_to_raise, self.controller.manage, req, {'share_server': self._setup_manage_test_request_body()}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_share_network_not_found(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] self.mock_object(utils, 'validate_service_host') error = mock.Mock( side_effect=exception.ShareNetworkNotFound(share_network_id="foo")) self.mock_object(db_api, 'share_network_get', error) body = self._setup_manage_test_request_body() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_driver_opts_not_instance_dict(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_network_get') body = self._setup_manage_test_request_body() body['driver_options'] = 'incorrect' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_error_extract_host(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] body = self._setup_manage_test_request_body() body['host'] = 'fake@backend#pool' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') @ddt.data(True, False) def test__validate_manage_share_server_error_subnet_not_found( self, body_contains_subnet): req = fakes.HTTPRequest.blank('/manage', version="2.51") context = req.environ['manila.context'] share_network = db_utils.create_share_network() body = {'share_server': self._setup_manage_test_request_body()} share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) body['share_server']['share_network_subnet_id'] = ( share_net_subnet['id'] if body_contains_subnet else None) self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(side_effect=exception.ShareNetworkSubnetNotFound( share_network_subnet_id='fake'))) self.mock_object(db_api, 'share_network_subnet_get_default_subnet', mock.Mock(return_value=None)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') if body_contains_subnet: db_api.share_network_subnet_get.assert_called_once_with( context, share_net_subnet['id']) else: (db_api.share_network_subnet_get_default_subnet .assert_called_once_with( context, body['share_server']['share_network_id'])) @ddt.data(True, False) def test_unmanage(self, force): server = self._setup_unmanage_tests() req = fakes.HTTPRequest.blank('/unmanage', version="2.49") context = req.environ['manila.context'] mock_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=server)) mock_unmanage = self.mock_object( share_api.API, 'unmanage_share_server', mock.Mock(return_value=202)) body = {'unmanage': {'force': force}} resp = self.controller.unmanage(req, server['id'], body) self.assertEqual(202, resp.status_int) mock_get.assert_called_once_with(context, server['id']) mock_unmanage.assert_called_once_with(context, server, force=force) def test_unmanage_share_server_not_found(self): """Tests unmanaging share servers""" req = fakes.HTTPRequest.blank('/v2/share-servers/fake_server_id/', version="2.49") context = req.environ['manila.context'] share_server_error = mock.Mock( side_effect=exception.ShareServerNotFound( share_server_id='fake_server_id')) get_mock = self.mock_object( db_api, 'share_server_get', share_server_error) body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, req, 'fake_server_id', body) get_mock.assert_called_once_with(context, 'fake_server_id') @ddt.data(constants.STATUS_MANAGING, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_UNMANAGING) def test_unmanage_share_server_invalid_statuses(self, status): """Tests unmanaging share servers""" server = self._setup_unmanage_tests(status=status) get_mock = self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) req = fakes.HTTPRequest.blank('/unmanage_share_server', version="2.49") context = req.environ['manila.context'] body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, server['id'], body) get_mock.assert_called_once_with(context, server['id']) def _setup_unmanage_tests(self, status=constants.STATUS_ACTIVE): server = db_utils.create_share_server( id='fake_server_id', status=status) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) return server @ddt.data(exception.ShareServerInUse, exception.PolicyNotAuthorized) def test_unmanage_share_server_badrequest(self, exc): req = fakes.HTTPRequest.blank('/unmanage', version="2.49") server = self._setup_unmanage_tests() context = req.environ['manila.context'] error = mock.Mock(side_effect=exc('foobar')) mock_unmanage = self.mock_object( share_api.API, 'unmanage_share_server', error) body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, 'fake_server_id', body) mock_unmanage.assert_called_once_with(context, server, force=True) policy.check_policy.assert_called_once_with( context, self.resource_name, 'unmanage_share_server') def _get_server_migration_request(self, server_id): req = fakes.HTTPRequest.blank( '/share-servers/%s/action' % server_id, use_admin_context=True, version='2.57') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True return req def test_share_server_migration_start(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) share_network = db_utils.create_share_network() req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(share_api.API, 'share_server_migration_start') body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', } } self.controller.share_server_migration_start(req, server['id'], body) db_api.share_server_get.assert_called_once_with( context, server['id']) share_api.API.share_server_migration_start.assert_called_once_with( context, server, 'fake_host', True, True, True, new_share_network=share_network) db_api.share_network_get.assert_called_once_with( context, 'fake_net_id') @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer(reason=""), 'expected_exception': webob.exc.HTTPConflict}) @ddt.unpack def test_share_server_migration_start_conflict(self, api_exception, expected_exception): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } self.mock_object(share_api.API, 'share_server_migration_start', mock.Mock(side_effect=api_exception)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises(expected_exception, self.controller.share_server_migration_start, req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) migration_start_params = body['migration_start'] share_api.API.share_server_migration_start.assert_called_once_with( context, server, migration_start_params['host'], migration_start_params['writable'], migration_start_params['nondisruptive'], migration_start_params['preserve_snapshots'], new_share_network=None) @ddt.data('host', 'body') def test_share_server_migration_start_missing_mandatory(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } if param == 'body': body.pop('migration_start') else: body['migration_start'].pop(param) method = 'share_server_migration_start' self.mock_object(share_api.API, method) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) @ddt.data('nondisruptive', 'writable', 'preserve_snapshots') def test_share_server_migration_start_non_boolean(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } body['migration_start'][param] = None method = 'share_server_migration_start' self.mock_object(share_api.API, method) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) def test_share_server_migration_start_share_server_not_found(self): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_start': {'host': 'fake_host'}} self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=fake_id))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_server_migration_start, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) def test_share_server_migration_start_new_share_network_not_found(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'nonexistent'}} self.mock_object(db_api, 'share_network_get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_migration_start, req, server['id'], body) db_api.share_network_get.assert_called_once_with(context, 'nonexistent') db_api.share_server_get.assert_called_once_with(context, server['id']) def test_share_server_migration_start_host_with_pool(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) body = { 'migration_start': { 'host': 'fake_host@fakebackend#pool', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', } }
be used.") if args.variable_weights_inference: options.constantWeights = False if args.group_host_syncs: options.groupHostSync = True if args.internal_exchange_optimisation_target is not None: engine_options["opt.internalExchangeOptimisationTarget"] = str(args.internal_exchange_optimisation_target) options.engineOptions = engine_options # Set synthetic data mode (if active) if args.synthetic_data: if args.synthetic_data_initializer == "zeros": options.syntheticDataMode = popart.SyntheticDataMode.Zeros else: options.syntheticDataMode = popart.SyntheticDataMode.RandomNormal logger.info( f"Running with Synthetic Data Type '{options.syntheticDataMode}'") return options def bert_session_patterns(args): patterns = popart.Patterns() if args.disable_attention_dropout_bwd: patterns.enablePattern("DisableAttnDropoutBwdPattern", True) if args.task == "PRETRAINING" and args.gradient_accumulation_factor <= 1 and not args.inference: patterns.enablePattern("TiedGatherPattern", False) logger.warning("Running Pretraining without Gradient Accumulation will disable optimisations " "for the Word Embedding weight. This will increase memory usage. " "Consider enabling Gradient Accumulation.") if args.optimizer == "SGD" and args.optimizer_state_offchip: patterns.enablePattern("TiedGatherPattern", False) logger.warning("Remote Optimizer State with SGD/SGD+M is not a recommended configuration") return patterns def compile_graph_checked(args, session): start_time = time.time() if args.compile_only: session.compileAndExport(args.engine_cache) else: session.prepareDevice() end_time = time.time() compile_time = end_time - start_time logger.info(f"Compiled. Duration {compile_time} seconds") if args.profile: popvision.save_app_info({"compile_time": compile_time}) if args.compile_only: sys.exit(0) def bert_distributed_training_session(args, **kwargs): try: import horovod.popart as hvd hvd.init() except ImportError: raise ImportError("Could not find the PopART horovod extension. " "Please install the horovod .whl provided in the Poplar SDK.") session = hvd.DistributedTrainingSession(**kwargs) logger.info("Compiling Training Graph") compile_graph_checked(args, session) logger.info("Broadcasting weights to all instances") hvd.broadcast_weights(session) return session def bert_training_session(model, args, feed, loss, device, optimizer_factory): options = bert_session_options(args, model) patterns = bert_session_patterns(args) proto = model.builder.getModelProto() optimizer = optimizer_factory.create() logger.info("Creating Session") session_kwargs = dict(fnModel=proto, loss=loss, deviceInfo=device, optimizer=optimizer, dataFlow=feed, patterns=patterns, userOptions=options) if args.use_popdist: session = bert_distributed_training_session(args, **session_kwargs) else: session = popart.TrainingSession(**session_kwargs) logger.info("Compiling Training Graph") compile_graph_checked(args, session) session.weightsFromHost() session.setRandomSeed(args.seed) anchors = session.initAnchorArrays() return session, anchors def bert_inference_session(model, args, feed, device): options = bert_session_options(args, model) patterns = bert_session_patterns(args) proto = model.builder.getModelProto() logger.info("Creating Session") session = popart.InferenceSession(fnModel=proto, deviceInfo=device, dataFlow=feed, patterns=patterns, userOptions=options) logger.info("Compiling Inference Graph") compile_graph_checked(args, session) session.weightsFromHost() session.setRandomSeed(args.seed) anchors = session.initAnchorArrays() return session, anchors def bert_writer(args): writer = None if args.log_dir is not None and popdist_root(args): log_name = f"{os.path.basename(args.checkpoint_dir)}."\ f"{datetime.datetime.now().isoformat()}" log_dir = os.path.join( args.log_dir, log_name) writer = SummaryWriter(log_dir=log_dir) return writer def get_bert_dataset(model, args, inputs): shapeOf = model.builder.getTensorShape # The inputs after the first three (ind, pos, seg) are always lists inputs = reduce(chain, inputs[3:], inputs[:3]) tensor_shapes = [(tensorId, shapeOf(tensorId)) for tensorId in inputs] if args.task == "PRETRAINING": ds = get_pretraining_dataset(args, tensor_shapes) elif args.task == "SQUAD": ds = get_squad_dataset(args, tensor_shapes, host_embeddings=model.get_model_embeddings()) else: raise RuntimeError(f"Unsupported Task {args.task} in get_bert_dataset") return ds def save_model(args, session, step, epoch=None, step_in_filename=False): if not args.no_model_save and popdist_root(args): save_file = "model" if epoch is not None: save_file += f"_{epoch}" if step_in_filename: save_file += f":{step}" if args.save_initializers_externally: save_dir = Path(args.checkpoint_dir, save_file) save_dir.mkdir(parents=True, exist_ok=True) else: save_dir = args.checkpoint_dir save_file += '.onnx' save_path = os.path.join(save_dir, save_file) save_vars = 'vars'.join(save_path.rsplit('model', 1)) if args.save_initializers_externally: if hasattr(args, 'save_vars_prev') and os.path.exists(args.save_vars_prev): logger.debug(f'Updating external location for vars to {args.save_vars_prev}.') session.updateExternallySavedTensorLocations(args.save_vars_prev, save_vars) session.modelToHost(save_path) args.save_vars_prev = save_vars logger.info(f"Saved model to: {save_path}.") if args.save_initializers_externally: logger.info(f"Saved variables(weights and optimizer state) to: {save_vars}.") def bert_process_data(args, session, data, anchors, losses, accuracies, iteration: Iteration, optimizer_factory): stepio = popart.PyStepIO(data, anchors) start = time.time() session.run(stepio) duration = time.time() - start hw_cycles = session.getCycleCount() if args.report_hw_cycle_count else None iteration.add_stats(duration, hw_cycles, data, args, anchors, losses, accuracies) if (iteration.count % iteration.steps_per_log) == 0: iteration.report_stats() if args.profile: sys.exit(0) # The following will only be true if: # Learning rate mode is STEP and the current total step counter is in the schedule # Learning rate mode is EPOCH and the current epoch has just changed to one in the schedule if optimizer_factory.should_update(iteration): optimizer = optimizer_factory.update_and_create(iteration) session.updateOptimizerFromHost(optimizer) iteration.count += 1 def compute_latency(args, start_times, end_times, durations): if args.low_latency_inference: if not start_times or not end_times: logger.warning("No stepio callback times recorded. Using durations for fallback calculation.") else: return compute_latency_from_callbacks(start_times, end_times, args.batches_per_step) return compute_latency_from_durations(durations) def bert_process_infer_data(args, session, data, anchors, logits, iteration: Iteration, start_times=None, end_times=None, stepio=None, accuracies=None, losses=None): if stepio is None: stepio = popart.PyStepIO(data, anchors) start = time.perf_counter() session.run(stepio) duration = time.perf_counter() - start hw_cycles = session.getCycleCount() if args.report_hw_cycle_count else None iteration.add_stats(duration, hw_cycles, data, args, anchors, losses, accuracies) mean_latency, min_latency, max_latency, p99_latency, p999_latency = compute_latency( args, start_times, end_times, iteration.durations) if (iteration.count % iteration.steps_per_log) == 0: iteration.report_inference_stats(mean_latency, min_latency, max_latency, p99_latency, p999_latency) if args.profile: sys.exit(0) iteration.count += 1 if args.task == "PRETRAINING": return None elif args.task == "SQUAD": logit = anchors[logits[0]] return [result.reshape(-1, args.sequence_length) for result in np.split(logit, 2, axis=-1)] return [anchors[logit] for logit in logits] def bert_train_loop(args, session, writer, dataset, accuracies, losses, anchors, iteration, optimizer_factory): start_epoch = iteration.epoch for iteration.epoch in range(start_epoch, iteration.epochs): for data in dataset: bert_process_data(args, session, data, anchors, losses, accuracies, iteration, optimizer_factory) if args.steps_per_save > 0 and (iteration.count % args.steps_per_save) == 0: save_model(args, session, iteration.count, iteration.epoch, True) if args.training_steps and iteration.count >= args.training_steps: logger.info(f"Ending Training at {iteration.count} Steps") return if args.epochs_per_save > 0 and ((iteration.epoch + 1) % iteration.epochs_per_save) == 0: save_model(args, session, iteration.count, iteration.epoch + 1) def bert_infer_loop(args, session, dataset, inputs, logits, anchors, accuracies, losses, iteration: Iteration): save_results = args.task == "SQUAD" and not (args.synthetic_data or args.generated_data) micro_batches = args.batches_per_step * args.replication_factor # Create the stepio once outside of the inference loop: static_data = {} start_times = defaultdict(list) end_times = defaultdict(list) stepio = None if args.low_latency_inference and args.task == "SQUAD": stepio = create_callback_stepio(static_data, anchors, start_times, end_times, dataset.batches_per_step, args.replication_factor) with realtime_scheduling(args.realtime_scheduler): for iteration.epoch in range(args.epochs_inference): for data in dataset: static_data.update({t: data[t].reshape(micro_batches, -1) for t in inputs}) result = bert_process_infer_data(args, session, static_data, anchors, logits, iteration, start_times, end_times, stepio, accuracies, losses) if result is not None and save_results and iteration.epoch == args.epochs_inference - 1: dataset.add_results(data, result) start_times.clear() end_times.clear() # If SQuAD run the evaluate-v1.1.py script and save the predictions if save_results: results = dataset.write_predictions() if args.wandb and results is not None: for k, v in results.items(): wandb.run.summary[k] = v def bert_required_ipus(args, model): return model.total_ipus * args.replication_factor def bert_pretrained_initialisers(config, args): if args.synthetic_data: logger.info("Initialising from synthetic_data") return None if args.generated_data: logger.info("Initialising from generated_data") return None # The initialised weights will be broadcast after the session has been created if not popdist_root(args): return None init = None if args.onnx_checkpoint: logger.info(f"Initialising from ONNX checkpoint: {args.onnx_checkpoint}") init = load_initializers_from_onnx(args.onnx_checkpoint) if args.tf_checkpoint: logger.info(f"Initialising from TF checkpoint: {args.tf_checkpoint}") init = load_initializers_from_tf(args.tf_checkpoint, True, config, args.task) return init def bert_optimizer_factory(args, model, iteration): if args.learning_rate_function == "Linear": return LinearOptimizerFactory(args, iteration, model.tensors) else: return ScheduledOptimizerFactory(args, iteration, model.tensors) def bert_iteration(args, dataset, writer): if args.task == "PRETRAINING": return PretrainingIteration( args, steps_per_epoch=len(dataset), writer=writer, recording_steps=args.aggregate_metrics_over_steps) else: return Iteration( args, steps_per_epoch=len(dataset), writer=writer, recording_steps=args.aggregate_metrics_over_steps) def main(args): set_library_seeds(args.seed) config = bert_config_from_args(args) initializers = bert_pretrained_initialisers(config, args) logger.info("Building Model") model = Bert(config, pipeline=args.pipeline, initializers=initializers) if not config.use_packed_sequence_format: # If config.host_embedding is enabled, indices and positions will have the matrices instead of the index vector. indices, positions, segments, masks, labels = bert_add_inputs(args, model) logits = model.build_graph(indices, positions, segments, masks) outputs, accuracies, losses, final_loss, writer = bert_add_outputs(args, model, logits, labels) dataset = get_bert_dataset(model, args, [indices, positions, segments, masks, labels]) else: # use_packed_sequence_format if args.task != "PRETRAINING": raise RuntimeError("Packed sequence format currently only supported for pretraining.") input_tensor_shapes = packed_bert_utils.add_inputs(model) logits = packed_bert_utils.logits_graph(model) losses, accuracies, final_loss, outputs = packed_bert_utils.pretraining_loss_and_accuracy(model, logits) writer = bert_writer(args) if not args.inference else None dataset = get_pretraining_dataset(args, input_tensor_shapes) device = acquire_device(args, bert_required_ipus(args, model)) logger.info(f"Dataset length: {len(dataset)}") data_flow = popart.DataFlow(args.batches_per_step, outputs) iteration = bert_iteration(args, dataset, writer) if args.inference: session, anchors = bert_inference_session( model, args, data_flow, device) logger.info("Inference Started") inputs = [indices, positions, segments, *masks, *labels] bert_infer_loop(args, session, dataset, inputs, logits, anchors, accuracies, losses, iteration) device.detach() else: if not args.no_training: optimizer_factory = bert_optimizer_factory(args, model, iteration) if args.save_initializers_externally: save_dir = Path(args.checkpoint_dir, f'model_{args.continue_training_from_epoch}') save_dir.mkdir(parents=True, exist_ok=True) weight_tensors = [item for sublist in model.tensors.values() for item in sublist] vars_path = f'vars_{args.continue_training_from_epoch}.onnx' vars_path = os.path.join(save_dir, vars_path) model.builder.saveInitializersExternally(weight_tensors, vars_path) session, anchors = bert_training_session(model, args, data_flow, final_loss, device, optimizer_factory) logger.info("Training Started") bert_train_loop(args, session, writer, dataset, accuracies, losses, anchors, iteration, optimizer_factory) save_model(args, session, iteration.count) if args.wandb_save_checkpoints: artifact = wandb.Artifact(name=args.wandb_save_checkpoints, type="model") artifact.add_dir(args.checkpoint_dir) wandb.log_artifact(artifact) device.detach() logger.info("Training Finished") return session, iteration def setup_logger(log_level, handler=None): # Define a root config with a format which is simpler for console use root = logging.getLogger() root.setLevel(log_level) root_handler = logging.StreamHandler(sys.stdout) root_formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S') root_handler.setFormatter(root_formatter) root.handlers = [root_handler] if handler is not None: root.handlers += [handler] # Define a specific Handler for this file that removes the root name. console = logging.StreamHandler(sys.stdout) formatter = logging.Formatter(
<filename>modules/general.py from discord.ext import commands import discord import aiohttp from bs4 import BeautifulSoup import base64 import config import random from PIL import Image from io import BytesIO import datetime import qrcode from urllib.parse import quote_plus import rethinkdb as r from math import sqrt from .utils import helpers, instance_tools, chat_formatting, paginator class General(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.guild_only() @commands.cooldown(2, 5, commands.BucketType.user) async def anime(self, ctx, *, search: str): """Get Anime Stats""" await ctx.trigger_typing() async with aiohttp.ClientSession() as cs: async with cs.post("https://graphql.anilist.co", json={ "query": helpers.anilist_query, "variables": { "search": search } }) as res: data = await res.json() if data.get("errors", []): return await ctx.send("Error getting data from anilist: {}".format(data["errors"][0]["message"])) media = data["data"]["Page"]["media"] if not media: return await ctx.send("Nothing found.") media = media[0] if media["isAdult"] is True and not ctx.channel.is_nsfw(): return await ctx.send("NSFW Anime can't be displayed in non NSFW channels.") color = int(media["coverImage"]["color"].replace("#", ""), 16) if media["coverImage"]["color"] else 0xdeadbf em = discord.Embed(colour=color) em.title = "{} ({})".format(media["title"]["romaji"], media["title"]["english"]) if media["description"]: desc = BeautifulSoup(media["description"], "lxml") if desc: em.description = desc.text em.url = "https://anilist.co/anime/{}".format(media["id"]) em.set_thumbnail(url=media["coverImage"]["extraLarge"]) em.add_field(name="Status", value=media["status"].title(), inline=True) em.add_field(name="Episodes", value=media["episodes"], inline=True) em.add_field(name="Score", value=str(media["averageScore"]), inline=True) em.add_field(name="Genres", value=", ".join(media["genres"])) dates = "{}/{}/{}".format(media["startDate"]["day"], media["startDate"]["month"], media["startDate"]["year"]) if media["endDate"]["year"] is not None: dates += " - {}/{}/{}".format(media["endDate"]["day"], media["endDate"]["month"], media["endDate"]["year"]) em.add_field(name="Date", value=dates) await ctx.send(embed=em) def whatanime_embedbuilder(self, doc: dict): em = discord.Embed(color=0xDEADBF) em.title = doc["title_romaji"] em.url = "https://myanimelist.net/anime/{}".format(doc["mal_id"]) em.add_field(name="Episode", value=str(doc["episode"])) em.add_field(name="At", value=str(doc["at"])) em.add_field(name="Matching %", value=str(round(doc["similarity"] * 100, 2))) em.add_field(name="Native Title", value=doc["title_native"]) em.set_footer(text="Powered by trace.moe") return em def whatanime_prefbuilder(self, doc): preview = f"https://trace.moe/thumbnail.php?anilist_id={doc['anilist_id']}" \ f"&file={doc['filename']}" \ f"&t={doc['at']}" \ f"&token={doc['tokenthumb']}" return preview @commands.command() @commands.cooldown(2, 25, commands.BucketType.user) @commands.guild_only() async def whatanime(self, ctx): """Check what the anime is from an image.""" if not len(ctx.message.attachments) == 0: img = ctx.message.attachments[0].url else: def check(m): return m.channel == ctx.message.channel and m.author == ctx.message.author try: await ctx.send("Send me an image of an anime to search for.") x = await self.bot.wait_for('message', check=check, timeout=15) except: return await ctx.send("Timed out.") if not len(x.attachments) >= 1: return await ctx.send("You didn't give me a image.") img = x.attachments[0].url async with aiohttp.ClientSession() as cs: async with cs.get(img) as r: res = await r.read() with Image.open(BytesIO(res)) as img: img.seek(0) img.convert("RGB") img.thumbnail((512, 288), Image.ANTIALIAS) i = BytesIO() img.save(i, format="PNG") i.seek(0) await ctx.trigger_typing() async with aiohttp.ClientSession() as cs: async with cs.post("https://trace.moe/api/search?token={}".format(config.whatanime), data={"image": str(base64.b64encode(i.read()).decode("utf8"))}, headers={"Content-Type": "application/x-www-form-urlencoded"}) as r: if r.status == 503: return await ctx.send("Service down for maintenance") try: res = await r.json() except: return await ctx.send("File too large.") if not res["docs"]: return await ctx.send("Nothing found.") doc = res["docs"][0] if doc["is_adult"] and not ctx.channel.is_nsfw: return await ctx.send("Can't send NSFW in a non NSFW channel.") em = self.whatanime_embedbuilder(doc) try: async with aiohttp.ClientSession() as cs: async with cs.get(self.whatanime_prefbuilder(doc)) as r: imres = await r.read() file = discord.File(imres, filename="file.gif") em.set_image(url="attachment://file.gif") except: file = None em.set_image(url="https://nekobot.xyz/placeholder.png") await ctx.send(embed=em, file=file) @commands.command() async def cookie(self, ctx, user: discord.Member): """Give somebody a cookie :3""" await ctx.send("<:NekoCookie:408672929379909632> - **{} gave {} a cookie OwO** -" " <:NekoCookie:408672929379909632>".format(ctx.message.author.name, user.mention)) @commands.command() @commands.cooldown(1, 3, commands.BucketType.user) async def choose(self, ctx, *items): """Choose between multiple options""" if not items: return await ctx.send_help(ctx.command) await ctx.send("I chose: **{}**!".format(helpers.clean_text(random.choice(items)))) def get_bot_uptime(self, *, brief=False): now = datetime.datetime.utcnow() delta = now - self.bot.uptime hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if not brief: if days: fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds' else: fmt = '{h} hours, {m} minutes, and {s} seconds' else: fmt = '{h}h {m}m {s}s' if days: fmt = '{d}d ' + fmt return fmt.format(d=days, h=hours, m=minutes, s=seconds) @commands.command() @commands.cooldown(1, 5, commands.BucketType.user) async def info(self, ctx): """Get Bot's Info""" await ctx.trigger_typing() i = instance_tools.InstanceTools(self.bot.instances, self.bot.redis) servers = await i.get_all_guilds() members = await i.get_all_users() channels = await i.get_all_channels() if isinstance(ctx.channel, discord.TextChannel): thisShard = ctx.guild.shard_id else: thisShard = 0 # len(self.bot.lavalink.players.find_all(lambda p: p.is_playing)) info = discord.Embed(color=0xDEADBF, title="**Info**") info.description = "Servers: **{} ({})**\nMembers: **{}**\nBot Commands: **{}**\nChannels: **{}**\nShards: **{}**\nThis Shard: **{}**\nBot in voice channel(s): **{}**\nUptime: **{}**\n".format( helpers.millify(servers), servers, helpers.millify(members), str(len(self.bot.commands)), helpers.millify(channels), self.bot.shard_count, thisShard, 0, self.get_bot_uptime() ) info.add_field(name="Links", value="[GitHub](https://github.com/hibikidesu/NekoBotRewrite/) | " "[Support Server](https://discord.gg/q98qeYN) | " "[Patreon](https://www.patreon.com/NekoBot)") info.set_thumbnail(url=self.bot.user.avatar_url_as(format="png")) await ctx.send(embed=info) @commands.command(aliases=["user"]) @commands.guild_only() @commands.cooldown(1, 4, commands.BucketType.user) async def userinfo(self, ctx, user: discord.Member = None): """Get a users info.""" if not user: user = ctx.message.author try: playinggame = user.activity.title except: playinggame = None server = ctx.message.guild embed = discord.Embed(color=0xDEADBF) embed.set_author(name=user.name, icon_url=user.avatar_url) embed.add_field(name="ID", value=user.id) embed.add_field(name="Discriminator", value=user.discriminator) embed.add_field(name="Bot", value=str(user.bot)) embed.add_field(name="Created", value=user.created_at.strftime("%d %b %Y %H:%M")) embed.add_field(name="Joined", value=user.joined_at.strftime("%d %b %Y %H:%M")) embed.add_field(name="Animated Avatar", value=str(user.is_avatar_animated())) embed.add_field(name="Playing", value=playinggame) embed.add_field(name="Status", value=user.status) embed.add_field(name="Color", value=str(user.color)) try: roles = [x.name for x in user.roles if x.name != "@everyone"] if roles: roles = sorted(roles, key=[x.name for x in server.role_hierarchy if x.name != "@everyone"].index) roles = ", ".join(roles) else: roles = "None" embed.add_field(name="Roles", value=roles) except: pass await ctx.send(embed=embed) @commands.command(aliases=["server"]) @commands.guild_only() @commands.cooldown(1, 4, commands.BucketType.user) async def serverinfo(self, ctx): """Display Server Info""" server = ctx.guild verif = server.verification_level online = len([m.status for m in server.members if m.status == discord.Status.online or m.status == discord.Status.idle]) embed = discord.Embed(color=0xDEADBF) embed.add_field(name="Name", value=f"**{server.name}**\n({server.id})") embed.add_field(name="Owner", value=server.owner) embed.add_field(name="Online (Cached)", value=f"**{online}/{server.member_count}**") embed.add_field(name="Created at", value=server.created_at.strftime("%d %b %Y %H:%M")) embed.add_field(name="Channels", value=f"Text Channels: **{len(server.text_channels)}**\n" f"Voice Channels: **{len(server.voice_channels)}**\n" f"Categories: **{len(server.categories)}**\n" f"AFK Channel: **{server.afk_channel}**") embed.add_field(name="Roles", value=str(len(server.roles))) embed.add_field(name="Emojis", value=f"{len(server.emojis)}/100") embed.add_field(name="Region", value=str(server.region).title()) embed.add_field(name="Security", value=f"Verification Level: **{verif}**\n" f"Content Filter: **{server.explicit_content_filter}**") try: embed.set_thumbnail(url=server.icon_url) except: pass await ctx.send(embed=embed) @commands.command(aliases=["channel"]) @commands.guild_only() @commands.cooldown(1, 3, commands.BucketType.user) async def channelinfo(self, ctx, channel: discord.TextChannel = None): """Get Channel Info""" if channel is None: channel = ctx.message.channel embed = discord.Embed(color=0xDEADBF, description=channel.mention) embed.add_field(name="Name", value=channel.name) embed.add_field(name="Server", value=channel.guild) embed.add_field(name="ID", value=channel.id) embed.add_field(name="Category ID", value=channel.category_id) embed.add_field(name="Position", value=channel.position) embed.add_field(name="NSFW", value=str(channel.is_nsfw())) embed.add_field(name="Members (cached)", value=str(len(channel.members))) embed.add_field(name="Category", value=channel.category) embed.add_field(name="Created", value=channel.created_at.strftime("%d %b %Y %H:%M")) await ctx.send(embed=embed) @commands.command() @commands.guild_only() async def urban(self, ctx, *, search_terms: str, definition_number: int = 1): """Search Urban Dictionary""" if not ctx.channel.is_nsfw(): return await ctx.send("Please use this in an NSFW channel.", delete_after=5) def encode(s): return quote_plus(s, encoding="utf-8", errors="replace") search_terms = search_terms.split(" ") try: if len(search_terms) > 1: pos = int(search_terms[-1]) - 1 search_terms = search_terms[:-1] else: pos = 0 if pos not in range(0, 11): pos = 0 except ValueError: pos = 0 search_terms = "+".join([encode(s) for s in search_terms]) url = "http://api.urbandictionary.com/v0/define?term=" + search_terms try: async with aiohttp.ClientSession() as cs: async with cs.get(url) as r: result = await r.json() if result["list"]: definition = result['list'][pos]['definition'] example = result['list'][pos]['example'] defs = len(result['list']) msg = ("**Definition #{} out of {}:\n**{}\n\n" "**Example:\n**{}".format(pos + 1, defs, definition, example)) msg = chat_formatting.pagify(msg, ["\n"]) for page in msg: await ctx.send(page) else: await ctx.send("Your search terms gave no results.") except IndexError: await ctx.send("There is no definition #{}".format(pos + 1)) @commands.command() @commands.cooldown(1, 5, commands.BucketType.user) async def avatar(self, ctx, user: discord.Member = None, format_type: str = None): """Get a user's avatar""" await ctx.channel.trigger_typing() if user is None: user = ctx.message.author try: color = await helpers.get_dominant_color(self.bot, user.avatar_url_as(format="png")) except: color = 0xDEADBF em = discord.Embed(color=color, title="{}'s Avatar".format(user.name)) if format_type is None or format_type not in ["png", "jpg", "gif", "jpeg", "webp"]: await ctx.send(embed=em.set_image(url=user.avatar_url)) else: await ctx.send(embed=em.set_image(url=user.avatar_url_as(format=format_type))) @commands.command() @commands.cooldown(1, 4, commands.BucketType.user) async def coffee(self, ctx): """Coffee owo""" await ctx.channel.trigger_typing() async with aiohttp.ClientSession() as cs: async with cs.get("https://nekobot.xyz/api/image?type=coffee") as res: imgdata = await res.json() em = discord.Embed() msg = await ctx.send("*drinks coffee*", embed=em.set_image(url=imgdata["message"])) color = await helpers.get_dominant_color(self.bot, imgdata["message"]) em = discord.Embed(color=color) await msg.edit(embed=em.set_image(url=imgdata["message"])) @commands.command() @commands.cooldown(1, 3, commands.BucketType.user) async def animepic(self, ctx): await ctx.trigger_typing() async with aiohttp.ClientSession() as cs: async with cs.get("https://nekobot.xyz/api/v2/image/animepic") as r: res = await r.json() image = res["message"] color = await helpers.get_dominant_color(self.bot, image) em = discord.Embed(color=color) await ctx.send(embed=em.set_image(url=image)) @commands.command() @commands.cooldown(1, 12, commands.BucketType.user) async def qr(self, ctx, *, message: str): """Generate a QR Code""" temp = BytesIO() qrcode.make(message).save(temp) temp.seek(0) await ctx.send(file=discord.File(temp, filename="qr.png")) @commands.command(aliases=["perms"]) @commands.guild_only() @commands.cooldown(1, 5, commands.BucketType.user) async def permissions(self, ctx, user: discord.Member = None, channel: str = None): """Get Permissions, Example Usage: n!permissions/n!perms @ひびき#0001 testing or n!permissions/n!perms ひびき#0001 #testing""" if user is None: user = ctx.message.author if channel is None: channel = ctx.message.channel else: channel = discord.utils.get(ctx.message.guild.channels, name=channel) msg = "Perms for {} in {}: \n".format(user.name.replace("@", "@\u200B"), channel.name.replace("@", "@\u200B")) try: perms = user.permissions_in(channel) msg += ", ".join([x[0].replace("_", " ").title() for x in perms if x[1]]) await ctx.send(msg) except: await ctx.send("Problem getting that channel...") @commands.command(aliases=["8"], name="8ball") @commands.cooldown(1, 3, commands.BucketType.user) async def _8ball(self, ctx,
"4354 4461 4536", 45562: "4354 4461 4537", 45563: "4354 4461 4538", 45564: "4354 4461 4539", 45565: "4354 4461 4540", 45566: "4354 4461 4541", 45567: "4354 4461 4542", 45568: "4354 4461 4543", 45569: "4354 4461 4544", 45570: "4354 4461 4545", 45571: "4354 4461 4546", 45572: "4354 4462", 45573: "4354 4462 4520", 45574: "4354 4462 4521", 45575: "4354 4462 4522", 45576: "4354 4462 4523", 45577: "4354 4462 4524", 45578: "4354 4462 4525", 45579: "4354 4462 4526", 45580: "4354 4462 4527", 45581: "4354 4462 4528", 45582: "4354 4462 4529", 45583: "4354 4462 4530", 45584: "4354 4462 4531", 45585: "4354 4462 4532", 45586: "4354 4462 4533", 45587: "4354 4462 4534", 45588: "4354 4462 4535", 45589: "4354 4462 4536", 45590: "4354 4462 4537", 45591: "4354 4462 4538", 45592: "4354 4462 4539", 45593: "4354 4462 4540", 45594: "4354 4462 4541", 45595: "4354 4462 4542", 45596: "4354 4462 4543", 45597: "4354 4462 4544", 45598: "4354 4462 4545", 45599: "4354 4462 4546", 45600: "4354 4463", 45601: "4354 4463 4520", 45602: "4354 4463 4521", 45603: "4354 4463 4522", 45604: "4354 4463 4523", 45605: "4354 4463 4524", 45606: "4354 4463 4525", 45607: "4354 4463 4526", 45608: "4354 4463 4527", 45609: "4354 4463 4528", 45610: "4354 4463 4529", 45611: "4354 4463 4530", 45612: "4354 4463 4531", 45613: "4354 4463 4532", 45614: "4354 4463 4533", 45615: "4354 4463 4534", 45616: "4354 4463 4535", 45617: "4354 4463 4536", 45618: "4354 4463 4537", 45619: "4354 4463 4538", 45620: "4354 4463 4539", 45621: "4354 4463 4540", 45622: "4354 4463 4541", 45623: "4354 4463 4542", 45624: "4354 4463 4543", 45625: "4354 4463 4544", 45626: "4354 4463 4545", 45627: "4354 4463 4546", 45628: "4354 4464", 45629: "4354 4464 4520", 45630: "4354 4464 4521", 45631: "4354 4464 4522", 45632: "4354 4464 4523", 45633: "4354 4464 4524", 45634: "4354 4464 4525", 45635: "4354 4464 4526", 45636: "4354 4464 4527", 45637: "4354 4464 4528", 45638: "4354 4464 4529", 45639: "4354 4464 4530", 45640: "4354 4464 4531", 45641: "4354 4464 4532", 45642: "4354 4464 4533", 45643: "4354 4464 4534", 45644: "4354 4464 4535", 45645: "4354 4464 4536", 45646: "4354 4464 4537", 45647: "4354 4464 4538", 45648: "4354 4464 4539", 45649: "4354 4464 4540", 45650: "4354 4464 4541", 45651: "4354 4464 4542", 45652: "4354 4464 4543", 45653: "4354 4464 4544", 45654: "4354 4464 4545", 45655: "4354 4464 4546", 45656: "4354 4465", 45657: "4354 4465 4520", 45658: "4354 4465 4521", 45659: "4354 4465 4522", 45660: "4354 4465 4523", 45661: "4354 4465 4524", 45662: "4354 4465 4525", 45663: "4354 4465 4526", 45664: "4354 4465 4527", 45665: "4354 4465 4528", 45666: "4354 4465 4529", 45667: "4354 4465 4530", 45668: "4354 4465 4531", 45669: "4354 4465 4532", 45670: "4354 4465 4533", 45671: "4354 4465 4534", 45672: "4354 4465 4535", 45673: "4354 4465 4536", 45674: "4354 4465 4537", 45675: "4354 4465 4538", 45676: "4354 4465 4539", 45677: "4354 4465 4540", 45678: "4354 4465 4541", 45679: "4354 4465 4542", 45680: "4354 4465 4543", 45681: "4354 4465 4544", 45682: "4354 4465 4545", 45683: "4354 4465 4546", 45684: "4354 4466", 45685: "4354 4466 4520", 45686: "4354 4466 4521", 45687: "4354 4466 4522", 45688: "4354 4466 4523", 45689: "4354 4466 4524", 45690: "4354 4466 4525", 45691: "4354 4466 4526", 45692: "4354 4466 4527", 45693: "4354 4466 4528", 45694: "4354 4466 4529", 45695: "4354 4466 4530", 45696: "4354 4466 4531", 45697: "4354 4466 4532", 45698: "4354 4466 4533", 45699: "4354 4466 4534", 45700: "4354 4466 4535", 45701: "4354 4466 4536", 45702: "4354 4466 4537", 45703: "4354 4466 4538", 45704: "4354 4466 4539", 45705: "4354 4466 4540", 45706: "4354 4466 4541", 45707: "4354 4466 4542", 45708: "4354 4466 4543", 45709: "4354 4466 4544", 45710: "4354 4466 4545", 45711: "4354 4466 4546", 45712: "4354 4467", 45713: "4354 4467 4520", 45714: "4354 4467 4521", 45715: "4354 4467 4522", 45716: "4354 4467 4523", 45717: "4354 4467 4524", 45718: "4354 4467 4525", 45719: "4354 4467 4526", 45720: "4354 4467 4527", 45721: "4354 4467 4528", 45722: "4354 4467 4529", 45723: "4354 4467 4530", 45724: "4354 4467 4531", 45725: "4354 4467 4532", 45726: "4354 4467 4533", 45727: "4354 4467 4534", 45728: "4354 4467 4535", 45729: "4354 4467 4536", 45730: "4354 4467 4537", 45731: "4354 4467 4538", 45732: "4354 4467 4539", 45733: "4354 4467 4540", 45734: "4354 4467 4541", 45735: "4354 4467 4542", 45736: "4354 4467 4543", 45737: "4354 4467 4544", 45738: "4354 4467 4545", 45739: "4354 4467 4546", 45740: "4354 4468", 45741: "4354 4468 4520", 45742: "4354 4468 4521", 45743: "4354 4468 4522", 45744: "4354 4468 4523", 45745: "4354 4468 4524", 45746: "4354 4468 4525", 45747: "4354 4468 4526", 45748: "4354 4468 4527", 45749: "4354 4468 4528", 45750: "4354 4468 4529", 45751: "4354 4468 4530", 45752: "4354 4468 4531", 45753: "4354 4468 4532", 45754: "4354 4468 4533", 45755: "4354 4468 4534", 45756: "4354 4468 4535", 45757: "4354 4468 4536", 45758: "4354 4468 4537", 45759: "4354 4468 4538", 45760: "4354 4468 4539", 45761: "4354 4468 4540", 45762: "4354 4468 4541", 45763: "4354 4468 4542", 45764: "4354 4468 4543", 45765: "4354 4468 4544", 45766: "4354 4468 4545", 45767: "4354 4468 4546", 45768: "4354 4469", 45769: "4354 4469 4520", 45770: "4354 4469 4521", 45771: "4354 4469 4522", 45772: "4354 4469 4523", 45773: "4354 4469 4524", 45774: "4354 4469 4525", 45775: "4354 4469 4526", 45776: "4354 4469 4527", 45777: "4354 4469 4528", 45778: "4354 4469 4529", 45779: "4354 4469 4530", 45780: "4354 4469 4531", 45781: "4354 4469 4532", 45782: "4354 4469 4533", 45783: "4354 4469 4534", 45784: "4354 4469 4535", 45785: "4354 4469 4536", 45786: "4354 4469 4537", 45787: "4354 4469 4538", 45788: "4354 4469 4539", 45789: "4354 4469 4540", 45790: "4354 4469 4541", 45791: "4354 4469 4542", 45792: "4354 4469 4543", 45793: "4354 4469 4544", 45794: "4354 4469 4545", 45795: "4354 4469 4546", 45796: "4355 4449", 45797: "4355 4449 4520", 45798: "4355 4449 4521", 45799: "4355 4449 4522", 45800: "4355 4449 4523", 45801: "4355 4449 4524", 45802: "4355 4449 4525", 45803: "4355 4449 4526", 45804: "4355 4449 4527", 45805: "4355 4449 4528", 45806: "4355 4449 4529", 45807: "4355 4449 4530", 45808: "4355 4449 4531", 45809: "4355 4449 4532", 45810: "4355 4449 4533", 45811: "4355 4449 4534", 45812: "4355 4449 4535", 45813: "4355 4449 4536", 45814: "4355 4449 4537", 45815: "4355 4449 4538", 45816: "4355 4449 4539", 45817: "4355 4449 4540", 45818: "4355 4449 4541", 45819: "4355 4449 4542", 45820: "4355 4449 4543", 45821: "4355 4449 4544", 45822: "4355 4449 4545", 45823: "4355 4449 4546", 45824: "4355 4450", 45825: "4355 4450 4520", 45826: "4355 4450 4521", 45827: "4355 4450 4522", 45828: "4355 4450 4523", 45829: "4355 4450 4524", 45830: "4355 4450 4525", 45831: "4355 4450 4526", 45832: "4355 4450 4527", 45833: "4355 4450 4528", 45834: "4355 4450 4529", 45835: "4355 4450 4530", 45836: "4355 4450 4531", 45837: "4355 4450 4532", 45838: "4355 4450 4533", 45839: "4355 4450 4534", 45840: "4355 4450 4535", 45841: "4355 4450 4536", 45842: "4355 4450 4537", 45843: "4355 4450 4538", 45844: "4355 4450 4539", 45845: "4355 4450 4540", 45846: "4355 4450 4541", 45847: "4355 4450 4542", 45848: "4355 4450 4543", 45849: "4355 4450 4544", 45850: "4355 4450 4545", 45851: "4355 4450 4546", 45852: "4355 4451", 45853: "4355 4451 4520", 45854: "4355 4451 4521", 45855: "4355 4451 4522", 45856: "4355 4451 4523", 45857: "4355 4451 4524", 45858: "4355 4451 4525", 45859: "4355 4451 4526", 45860: "4355 4451 4527", 45861: "4355 4451 4528", 45862: "4355 4451 4529", 45863: "4355 4451 4530", 45864: "4355 4451 4531", 45865: "4355 4451 4532", 45866: "4355 4451 4533", 45867: "4355 4451 4534", 45868: "4355 4451 4535", 45869: "4355 4451 4536", 45870: "4355 4451 4537", 45871: "4355 4451 4538", 45872: "4355 4451 4539", 45873: "4355 4451 4540", 45874: "4355 4451 4541", 45875: "4355 4451 4542", 45876: "4355 4451 4543", 45877: "4355 4451 4544", 45878: "4355 4451 4545", 45879: "4355 4451 4546", 45880: "4355 4452", 45881: "4355 4452 4520", 45882: "4355 4452 4521", 45883: "4355 4452 4522", 45884:
settings are kept null newFormType.form_type_group = None newFormType.is_hierarchical = False #We need to delete all of the child Forms parent references remove_all_form_hierarchy_parent_references(newFormType) else: newFormType.type = 0; #Update the form type's group #If it's a new group if post_data.get('ft_group') == 'NEW': #Create a new formtype group newFormTypeGroup = FormTypeGroup(name=request.POST['ft_group_new'], project=newFormType.project) #Add the user information - We only set created by in endpoints that create the model for the first time newFormTypeGroup.created_by = request.user newFormTypeGroup.modified_by = request.user newFormTypeGroup.save() newFormType.form_type_group = newFormTypeGroup #If it's coded to remove the group, then set the field to null elif post_data.get('ft_group') == 'NONE': newFormType.form_type_group = None #Otherwise it's not a new group and not being removed so use the provided value else: newFormType.form_type_group = FormTypeGroup.objects.get(pk=request.POST['ft_group']) print >>sys.stderr, "WTF!!!! " + post_data.get('ft_group') #update the formtypes status as hierarchical if 'is_hierarchical' in post_data: newFormType.is_hierarchical = True else: newFormType.is_hierarchical = True newFormType.save() #Update all of the FormType's FormRecordAttributeTypes for key in post_data: splitKey = key.split("__") if len(splitKey) == 3: code,type_pk,instruction = splitKey #If we are creating a new attribute type if code == "frat" and instruction == "new": newAttributeType = FormRecordAttributeType(record_type=post_data[key]) newAttributeType.form_type = newFormType #Add the user information - We only set created by in endpoints that create the model for the first time newAttributeType.created_by = request.user newAttributeType.modified_by = request.user newAttributeType.project = newFormType.project if post_data[code + '__' + type_pk + '__order'] != "": newAttributeType.order_number = int(post_data[code + '__' + type_pk + '__order']) else: #We need to give a random order number--if we don't, when Django attempts to order queries, it will get confused #--if two of the attribute types share the same number. If they have more than 600 unique columns---it won't matter #--anyway, because order just shows the first 5--this will just help the initial setup if someone doesn't set the #--order fields at all. newAttributeType.order_number = random.randint(399,999) newAttributeType.save() #If we are creating a new reference type if code == "frrt" and instruction == "new": newReferenceType = FormRecordReferenceType(record_type=post_data[key]) newReferenceType.form_type_parent = newFormType newReferenceType.project = newFormType.project #Add the user information - We only set created by in endpoints that create the model for the first time newReferenceType.created_by = request.user newReferenceType.modified_by = request.user #we use the auto-incremented temp id used in the javascript form to match the refeerence value for this ref type if post_data["nfrrt__"+type_pk+"__ref"] == "self-reference": newReferenceType.form_type_reference = newFormType elif post_data["nfrrt__"+type_pk+"__ref"] == "-1": newReferenceType.form_type_reference = None else: newReferenceType.form_type_reference = FormType.objects.get(pk=post_data["nfrrt__"+type_pk+"__ref"]) if post_data['n' + code + '__' + type_pk + '__order'] != "": newReferenceType.order_number = int(post_data['n' + code + '__' + type_pk + '__order']) else: #See explanation above ^^^^^^^^^ for this random int range newReferenceType.order_number = random.randint(399,999) newReferenceType.save() #SUCCESS!! return HttpResponse('{"MESSAGE":"SUCCESS!"}',content_type="application/json") else: ERROR_MESSAGE += "Error: You are trying to access the API without using a POST request." else: ERROR_MESSAGE += "Error: You do not have permission to access modifying form type information" #If anything goes wrong in the process, return an error in the json HTTP Response SECURITY_log_security_issues(request.user, 'admin.py - ' + str(sys._getframe().f_code.co_name), ERROR_MESSAGE, request.META) return HttpResponse('{"ERROR":"'+ ERROR_MESSAGE +'","row_index":"0","is_complete":"True", "row_total":"0", "row_timer":"0"}',content_type="application/json") #=======================================================# # ACCESS LEVEL : 3 SAVE_FORM_TYPE_CHANGES() #=======================================================# def save_form_type_changes(self, request): #***************# ACCESS_LEVEL = 3 #***************# #---------------------------------------------------------------------------------------------------------------------------- # This endpoint takes in POST data submitted by a form type editing page and makes the necessary changes. It also handles # --any tools in the form type editor, e.g. changing a attribute RTYPE to a refrence RTYPE. Another Endpoint handles creating NEW # --formtypes. This is only used for editing. # # It requires a level 3 access to make form type changes. We also put in a project restriction on the formtype constrained by the # --project ID in the user's permissions. If the formtype query set is 0 in length, then this endpoint will return an error ERROR_MESSAGE = "" #Check our user's session and access level if SECURITY_check_user_permissions(ACCESS_LEVEL, request.user.permissions.access_level): if request.method == 'POST': deletedObjects = {} formTypeToEdit = FormType.objects.get(pk=request.POST['formtype_pk']) if formTypeToEdit.project.pk == request.user.permissions.project.pk: post_data = request.POST #Update the form's basic attributes formTypeToEdit.form_type_name = post_data.get('form_type_name') #Add the user information formTypeToEdit.modified_by = request.user #add the appropriate flag for the formtype's har-coded type: e.g. is is a media or control group? print >>sys.stderr, post_data.get('formtype-type') if post_data.get('ft_media_type') != '-1':#media formTypeToEdit.type = 1; #also add the media type, e.g. img/pdf/3d etc. formTypeToEdit.media_type = post_data.get('ft_media_type') formTypeToEdit.file_extension = post_data.get('file_extension') #If there is a URI prefix then add one--otherwise set it to None if 'uri_prefix' in post_data: if post_data['uri_prefix'] != "" or post_data['uri_prefix'] != " ": formTypeToEdit.uri_prefix = post_data['uri_prefix'] else: formTypeToEdit.uri_prefix = None #Make sure that the hierarchy and group settings are kept null formTypeToEdit.form_type_group = None formTypeToEdit.is_hierarchical = False #We need to delete all of the child Forms parent references remove_all_form_hierarchy_parent_references(formTypeToEdit) else: formTypeToEdit.type = 0; #standard formtype #Update the form type's group #If it's a new group if post_data.get('ft_group') == 'NEW': #Create a new formtype group newFormTypeGroup = FormTypeGroup(name=post_data.get('ft_group_new'), project=request.user.permissions.project) #Add the user information newFormTypeGroup.modified_by = request.user newFormTypeGroup.created_by = request.user newFormTypeGroup.save() formTypeToEdit.form_type_group = newFormTypeGroup #If it's coded to remove the group, then set the field to null elif post_data.get('ft_group') == 'NONE': formTypeToEdit.form_type_group = None #Otherwise it's not a new group and not being removed so use the provided value else: formTypeToEdit.form_type_group = FormTypeGroup.objects.get(pk=post_data.get('ft_group')) print >>sys.stderr, "WTF!!!! " + post_data.get('ft_group') #update the formtypes status as hierarchical if 'is_hierarchical' in post_data: formTypeToEdit.is_hierarchical = True else: formTypeToEdit.is_hierarchical = False #Save the formtype formTypeToEdit.save() #Update all of the form's FormRecordAttributeTypes for key in post_data: splitKey = key.split("__") if len(splitKey) > 1: #-------------------------------------------------------------------------------------------------------- #Update all of the form's FormRecordAttributeTypes #-------------------------------------------------------------------------------------------------------- # $$SS-VALIDATION$$ This "If" checks to make sure no keys that have been removed for different reasons are used going forward $$ logging.info("CURRENT KEY: " + key + "Is in deleted objects?") print >> sys.stderr, "Fucking keys = ?? ", for akey in deletedObjects: print >> sys.stderr, akey+", ", print >>sys.stderr, " " if key not in deletedObjects: if len(splitKey) == 2: code,type_pk = splitKey if code == "frat": currentAttributeType = FormRecordAttributeType.objects.get(pk=type_pk) currentAttributeType.record_type = post_data[key] if post_data[key + '__order'] != "": currentAttributeType.order_number = int(post_data[key + '__order']) else: #We need to give a random order number--if we don't, when Django attempts to order queries, it will get confused #--if two of the attribute types share the same number. If they have more than 600 unique columns---it won't matter #--anyway, because order just shows the first 5--this will just help the initial setup if someone doesn't set the #--order fields at all. currentAttributeType.order_number = random.randint(399,999) #Add the user information currentAttributeType.modified_by = request.user currentAttributeType.save() if len(splitKey) == 3: code,type_pk,instruction = splitKey #If we are creating a new attribute type if code == "frat" and instruction == "new": newAttributeType = FormRecordAttributeType(record_type=post_data[key]) newAttributeType.form_type = formTypeToEdit if post_data[code + '__' + type_pk + '__order'] != "": newAttributeType.order_number = int(post_data[code + '__' + type_pk + '__order']) else: #We need to give a random order number--if we don't, when Django attempts to order queries, it will get confused #--if two of the attribute types share the same number. If they have more than 600 unique columns---it won't matter #--anyway, because order just shows the first 5--this will just help the initial setup if someone doesn't set the #--order fields at all. newAttributeType.order_number = random.randint(399,999) #Add the user information newAttributeType.modified_by = request.user newAttributeType.created_by = request.user newAttributeType.save() #TODO: Techincally all related forms to this formtype won't have an attached value until edited on the admin page #Should I go ahead and add a null attribute value? #If we are getting an instruction from the user to delete this attribute type then delete it elif code== "frat" and instruction == "DEL": #*** RECYCLING BIN *** pass this FRAT to the recycling bin recycledFRAT = FormRecordAttributeType.objects.get(pk=type_pk) recycledFRAT.flagged_for_deletion = True recycledFRAT.save();
if len(data_settings.i_coords) == 0: if warning_strings != None: warning_strings.append( 'WARNING(dump2data): atom_style unknown. (Use -atomstyle style. Assuming \"full\")') warn_atom_style_unspecified = True # The default atom_style is "full" data_settings.column_names = AtomStyle2ColNames('full') ii_coords = ColNames2Coords(data_settings.column_names) # This program assumes that there is only one coordinate triplet # (x,y,z) for each atom. Hence we assume that len(ii_coords)==1 assert(len(ii_coords) == 1) data_settings.i_coords = ii_coords[0] data_settings.ii_vects = ColNames2Vects(data_settings.column_names) data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid( data_settings.column_names) # sys.stderr.write('########################################################\n' # '## WARNING: atom_style unspecified ##\n' # '## --> \"Atoms\" column data has an unknown format. ##\n' # '## Assuming atom_style = \"full\" ##\n' # '########################################################\n' # '## To specify the \"Atoms\" column format you can: ##\n' # '## 1) Use the -atom_style \"STYLE\" argument ##\n' # '## where \"STYLE\" is a string indicating a LAMMPS ##\n' # '## atom_style, including hybrid styles.(Standard ##\n' # '## atom styles defined in 2011 are supported.) ##\n' # '## 2) Use the -atom_style \"COL_LIST\" argument ##\n' # '## where \"COL_LIST" is a quoted list of strings ##\n' # '## indicating the name of each column. ##\n' # '## Names \"x\",\"y\",\"z\" are interpreted as ##\n' # '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n' # '## and \"quati\",\"quatj\",\"quatk\" are ##\n' # '## interpreted as direction vectors. ##\n' # '## 3) Use the -icoord \"cx cy cz...\" argument ##\n' # '## where \"cx cy cz\" is a list of integers ##\n' # '## indicating the column numbers for the x,y,z ##\n' # '## coordinates of each atom. ##\n' # '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n' # '## where \"cmux cmuy cmuz...\" is a list of ##\n' # '## integers indicating the column numbers for ##\n' # '## the vector that determines the direction of a ##\n' # '## dipole or ellipsoid (ie. a rotateable vector).##\n' # '## (More than one triplet can be specified. The ##\n' # '## number of entries must be divisible by 3.) ##\n' # '## 5) Include a ##\n' # '## write(\"in_init.txt\"){atom_style ...} ##\n' # '## statement in your .ttree file. ##\n' # '########################################################\n') def GetIntAtomID(pair): return int(pair[0]) def WriteFrameToData(out_file, descr_str, misc_settings, data_settings, dump_column_names, natoms, coords, coords_ixiyiz, vects, velocities, atomtypes, molids, xlo_str, xhi_str, ylo_str, yhi_str, zlo_str, zhi_str, xy_str, xz_str, yz_str): """ Open a data file. Read the LAMMPS DATA file line by line. When the line contains information which is also in the dump file, replace that information with information from the dump file. (Information from a dump file is stored in the arguments to this function.) The resulting file also has LAMMPS DATA format. """ section = '' firstline = True for line in data_settings.contents: ic = line.find('#') if ic != -1: line = line[:ic] line = line.strip() if firstline: # Construct a new descriptive header line: if descr_str != None: line = descr_str firstline = False if (len(line) > 0): # The initial section (section='') is assumed to be # the "LAMMPS Description" section. This is where the # box boundaries are specified. if section == '': tokens = line.split() if ((len(tokens) >= 2) and ((tokens[-2] == 'xlo') and (tokens[-1] == 'xhi')) and ((xlo_str != None) and (xhi_str != None))): tokens[0] = xlo_str tokens[1] = xhi_str line = ' '.join(tokens) elif ((len(tokens) >= 2) and ((tokens[-2] == 'ylo') and (tokens[-1] == 'yhi')) and ((ylo_str != None) and (yhi_str != None))): tokens[0] = ylo_str tokens[1] = yhi_str line = ' '.join(tokens) elif ((len(tokens) >= 2) and ((tokens[-2] == 'zlo') and (tokens[-1] == 'zhi')) and ((zlo_str != None) and (zhi_str != None))): tokens[0] = zlo_str tokens[1] = zhi_str line = ' '.join(tokens) elif ((len(tokens) >= 3) and ((tokens[-3] == 'xy') and (tokens[-2] == 'xz') and (tokens[-1] == 'yz')) and ((xy_str != None) and (xz_str != None) and (yz_str != None))): tokens[0] = xy_str tokens[1] = xz_str tokens[2] = yz_str line = ' '.join(tokens) if (line in set(['Masses', 'Velocities', 'Atoms', 'Bond Coeffs', 'Angle Coeffs', 'Dihedral Coeffs', 'Improper Coeffs', 'Bonds', 'Angles', 'Dihedrals', 'Impropers'])): section = line else: if (section == 'Atoms'): tokens = line.split() atomid = tokens[0] # update the atomtype and molID # (which may change during the simulation) if atomtypes: tokens[data_settings.i_atomtype] = atomtypes[atomid] if molids and data_settings.i_molid: tokens[data_settings.i_molid] = molids[atomid] if atomid in coords: # Loop over all of the vector degrees of # freedom of the particle, excluding coords # (for example: mu_x, mu_y, mu_z, # or quat_i, quat_j, quat_k) # In principle, depending on the atom_style, # there could be multiple vectors per atom. for I in range(0, len(data_settings.ii_vects)): i_vx = data_settings.ii_vects[I][0] i_vy = data_settings.ii_vects[I][1] i_vz = data_settings.ii_vects[I][2] if atomid in vects: vxvyvz = vects[atomid][I] assert((type(vxvyvz) is tuple) and (len(vxvyvz) == 3)) if ((i_vx >= len(tokens)) or (i_vy >= len(tokens)) or (i_vz >= len(tokens))): raise InputError('Error(dump2data): Atom style incompatible with data file.\n' ' Specify the atom_style using -atomstyle style.\n') # Replace the vector components with numbers # from the dump file tokens[i_vx] = vxvyvz[0] tokens[i_vy] = vxvyvz[1] tokens[i_vz] = vxvyvz[2] else: if (dump_column_names and (data_settings.column_names[ i_vx] not in dump_column_names)): raise InputError('Error(dump2data): You have a vector coordinate in your DATA file named \"' + data_settings.column_names[i_vx] + '\"\n' ' However there are no columns with this name in your DUMP file\n' ' (or the column was not in the expected place).\n' ' Hence, the atom styles in the dump and data files do not match.') # Now loop over the coordinates of each atom. # for I in range(0,len(data_settings.ii_coords)): # xyz = coords[atomid][I] # THIS LOOP IS SILLY. # EACH ATOM ONLY HAS ONE SET OF X,Y,Z # COORDINATES. COMMENTING OUT THIS LOOP: # i_x = data_settings.ii_coords[I][0] # i_y = data_settings.ii_coords[I][1] # i_z = data_settings.ii_coords[I][2] # USING THIS INSTEAD: xyz = coords[atomid] i_x = data_settings.i_coords[0] i_y = data_settings.i_coords[1] i_z = data_settings.i_coords[2] if ((i_x >= len(tokens)) or (i_y >= len(tokens)) or (i_z >= len(tokens))): raise InputError('Error(dump2data): Atom style incompatible with data file.\n' ' Specify the atom_style using -atomstyle style.\n') # Replace the coordinates with coordinates from # the dump file into tokens[i_x]... tokens[i_x] = str(xyz[0]) tokens[i_y] = str(xyz[1]) tokens[i_z] = str(xyz[2]) # Are there there any integer coords # (ix, iy, iz) in the dump file? if coords_ixiyiz[atomid]: assert(len(coords_ixiyiz[atomid]) == 3) # Integer coords stored in the DATA file too? if len(tokens) == (len(data_settings.column_names) + 3): # Then replace the last 3 columns of the # line in the data file with: ix iy iz tokens[-3] = coords_ixiyiz[atomid][0] tokens[-2] = coords_ixiyiz[atomid][1] tokens[-1] = coords_ixiyiz[atomid][2] else: if (not misc_settings.center_frame): # Append them to the end of the line: tokens.append(coords_ixiyiz[atomid][0]) tokens.append(coords_ixiyiz[atomid][1]) tokens.append(coords_ixiyiz[atomid][2]) # Now finally paste all the tokens together: line = ' '.join(tokens) elif (section == 'Velocities'): tokens = line.split() atomid = tokens[0] if atomid in velocities: vxvyvz = velocities[atomid] if len(tokens) < 4: raise InputError( 'Error(dump2data): Not enough columns in the \"Velocities\" file.\n') # Replace the coordinates with coordinates from # the dump file into tokens[i_x]... tokens[1] = str(vxvyvz[0]) tokens[2] = str(vxvyvz[1]) tokens[3] = str(vxvyvz[2]) # Now finally paste all the tokens together: line = ' '.join(tokens) out_file.write(line + '\n') return def main(): sys.stderr.write(g_program_name + ' v' + g_version_str + ' ' + g_date_str + ' ') # if sys.version < '3': # sys.stderr.write(' (python version < 3)\n') # else: sys.stderr.write('\n') try: data_settings = DataSettings() misc_settings = MiscSettings() warning_strings = [] ParseArgs(sys.argv, misc_settings, data_settings, warning_strings) # Open the lammps dump file (trajectory file) # Skip to the line containing the correct frame/timestep. # (this is the last frame by default). # Read the "BOX BOUNDS" and the "ATOMS" sections. # Store the x,y,z coordinates in the "coords" associative array # (indexed by atom id, which could be non-numeric in general). section = '' #coords = defaultdict(list) #coords_ixiyiz = defaultdict(list) #vects = defaultdict(list) #xlo_str = xhi_str = ylo_str = yhi_str = zlo_str = zhi_str = None #xy_str = xz_str = yz_str = None #natoms = -1 #timestep_str = '' frame_coords = defaultdict(list) frame_coords_ixiyiz = defaultdict(list)
# File: greynoise_connector.py # # Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt) # Python 3 Compatibility imports from __future__ import print_function, unicode_literals # Phantom App imports import phantom.app as phantom from phantom.base_connector import BaseConnector from phantom.action_result import ActionResult from greynoise_consts import * import requests import json from requests.utils import requote_uri from six.moves.urllib.parse import urljoin as _urljoin import urllib.parse def urljoin(base, url): return _urljoin("%s/" % base.rstrip("/"), url.lstrip("/")) class GreyNoiseConnector(BaseConnector): """Connector for GreyNoise App.""" def __init__(self): """GreyNoise App Constructor.""" super(GreyNoiseConnector, self).__init__() self._session = None self._app_version = None self._api_key = None def validate_parameters(self, param): # Disable BaseConnector's validate functionality, since this App supports unicode domains and the # validation routines don't return phantom.APP_SUCCESS def _get_error_message_from_exception(self, e): """ This method is used to get appropriate error messages from the exception. :param e: Exception object :return: error message """ try: if e.args: if len(e.args) > 1: error_code = e.args[0] error_msg = e.args[1] elif len(e.args) == 1: error_code = ERR_CODE_MSG error_msg = e.args[0] else: error_code = ERR_CODE_MSG error_msg = ERR_MSG_UNAVAILABLE except: error_code = ERR_CODE_MSG error_msg = ERR_MSG_UNAVAILABLE try: if error_code in ERR_CODE_MSG: error_text = "Error Message: {0}".format(error_msg) else: error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg) except: self.debug_print(PARSE_ERR_MSG) error_text = PARSE_ERR_MSG return error_text def _validate_integer(self, action_result, parameter, key): if parameter: try: if not float(parameter).is_integer(): return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None parameter = int(parameter) except: return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None if parameter < 0: return action_result.set_status(phantom.APP_ERROR, NON_NEGATIVE_INTEGER_MSG.format(key=key)), None return phantom.APP_SUCCESS, parameter def get_session(self): if self._session is None: self._session = requests.Session() self._session.params.update({ "api-key": self._api_key }) return self._session def _make_rest_call(self, action_result, method, *args, error_on_404=True, **kwargs): session = self.get_session() response_json = None status_code = None try: r = session.request(method, *args, **kwargs) if r.status_code != 404 or error_on_404: r.raise_for_status() status_code = r.status_code except requests.exceptions.HTTPError as e: err_msg = self._get_error_message_from_exception(e) err_msg = urllib.parse.unquote(err_msg) ret_val = action_result.set_status(phantom.APP_ERROR, "HTTP error occurred while making REST call: {0}".format(err_msg)) except Exception as e: err_msg = self._get_error_message_from_exception(e) ret_val = action_result.set_status(phantom.APP_ERROR, "General error occurred while making REST call: {0}".format(err_msg)) else: try: response_json = r.json() ret_val = phantom.APP_SUCCESS except Exception as e: err_msg = self._get_error_message_from_exception(e) ret_val = action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(err_msg)) return (ret_val, response_json, status_code) def _check_apikey(self, action_result): self.save_progress("Testing API key") ret_val, response_json, status_code = self._make_rest_call( action_result, "get", API_KEY_CHECK_URL, headers=self._headers ) if phantom.is_fail(ret_val): self.save_progress("API key check Failed") return ret_val if response_json is None: self.save_progress("No response from API") return action_result.set_status(phantom.APP_ERROR, "No response from API") elif response_json.get("message") == "pong": self.save_progress("Validated API Key") self.debug_print("Validated API Key") return phantom.APP_SUCCESS else: self.save_progress("Invalid response from API") try: response_json = json.dumps(response_json) except: return action_result.set_status(phantom.APP_ERROR, "Invalid response from API") return action_result.set_status(phantom.APP_ERROR, "Invalid response from API: %s" % response_json) def _test_connectivity(self, param): action_result = self.add_action_result(ActionResult(dict(param))) ret_val = self._check_apikey(action_result) if phantom.is_fail(ret_val): self.save_progress("Test Connectivity Failed") return ret_val self.save_progress("Test Connectivity Passed") return action_result.set_status(phantom.APP_SUCCESS) def _lookup_ip(self, param): action_result = self.add_action_result(ActionResult(dict(param))) ret_val = self._check_apikey(action_result) if phantom.is_fail(ret_val): return ret_val ret_val, response_json, status_code = self._make_rest_call( action_result, "get", LOOKUP_IP_URL.format(ip=param["ip"]), headers=self._headers ) if phantom.is_fail(ret_val): return ret_val result_data = {} action_result.add_data(result_data) result_data.update(response_json) try: result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"]) if result_data["code"] in CODES: result_data["code_meaning"] = CODES[result_data["code"]] else: result_data["code_meaning"] = "This code is unmapped" except KeyError: return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response") return action_result.set_status(phantom.APP_SUCCESS) def _ip_reputation(self, param): action_result = self.add_action_result(ActionResult(dict(param))) ret_val = self._check_apikey(action_result) if phantom.is_fail(ret_val): return ret_val ret_val, response_json, status_code = self._make_rest_call( action_result, "get", IP_REPUTATION_URL.format(ip=param["ip"]), headers=self._headers ) if phantom.is_fail(ret_val): return ret_val result_data = {} action_result.add_data(result_data) result_data.update(response_json) try: result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"]) except KeyError: return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response") return action_result.set_status(phantom.APP_SUCCESS) def _gnql_query(self, param, is_poll=False, action_result=None): if not is_poll: action_result = self.add_action_result(ActionResult(dict(param))) ret_val = self._check_apikey(action_result) if phantom.is_fail(ret_val): if is_poll: return ret_val, None else: return ret_val first_flag = True remaining_results_flag = True scroll_token = "" full_response = {} size = param["size"] # Validate 'size' action parameter ret_val, size = self._validate_integer(action_result, size, SIZE_ACTION_PARAM) if phantom.is_fail(ret_val): if is_poll: return action_result.get_status(), None else: return action_result.get_status() while remaining_results_flag: if first_flag: ret_val, response_json, status_code = self._make_rest_call( action_result, "get", GNQL_QUERY_URl, headers=self._headers, params=(('query', param["query"]), ('size', size)) ) full_response.update(response_json) if "scroll" in full_response: scroll_token = full_response["scroll"] if "complete" in full_response or len(full_response["data"]) >= size: remaining_results_flag = False elif "message" in full_response: if full_response["message"] == "no results": remaining_results_flag = False first_flag = False if remaining_results_flag: ret_val, response_json, status_code = self._make_rest_call( action_result, "get", GNQL_QUERY_URl, headers=self._headers, params=(('query', param["query"]), ('size', size), ('scroll', scroll_token)) ) full_response["complete"] = response_json["complete"] if "scroll" in response_json: full_response["scroll"] = response_json["scroll"] for item in response_json["data"]: full_response["data"].append(item) if "scroll" in full_response: scroll_token = full_response["scroll"] if "complete" in full_response or len(full_response["data"]) >= size: remaining_results_flag = False elif "message" in full_response: if full_response["message"] == "no results": remaining_results_flag = False else: remaining_results_flag = True if phantom.is_fail(ret_val): if is_poll: return ret_val, None else: return ret_val result_data = {} action_result.add_data(result_data) try: for entry in full_response["data"]: entry["visualization"] = VISUALIZATION_URL.format(ip=entry["ip"]) except KeyError: error_msg = "Error occurred while processing API response" if is_poll: return action_result.set_status(phantom.APP_ERROR, error_msg), None else: return action_result.set_status(phantom.APP_ERROR, error_msg) result_data.update(full_response) if is_poll: return ret_val, result_data else: return action_result.set_status(phantom.APP_SUCCESS) def _lookup_ips(self, param): action_result = self.add_action_result(ActionResult(dict(param))) ret_val = self._check_apikey(action_result) if phantom.is_fail(ret_val): return ret_val try: ips = [x.strip() for x in param["ips"].split(",")] ips = list(filter(None, ips)) if not ips: return action_result.set_status(phantom.APP_ERROR, INVALID_COMMA_SEPARATED_VALUE_ERR_MSG.format(key='ips')) ips = ",".join(ips) ips_string = requote_uri(ips) except Exception as e: err = self._get_error_message_from_exception(e) err_msg = "Error occurred while processing 'ips' action parameter. {0}".format(err) return action_result.set_status(phantom.APP_ERROR, err_msg) ret_val, response_json, status_code = self._make_rest_call( action_result, "get", LOOKUP_IPS_URL.format(ips=ips_string), headers=self._headers ) if phantom.is_fail(ret_val): return ret_val result_data = [] action_result.add_data(result_data) try: for result in response_json: if result["code"] in CODES: result["code_meaning"] = CODES[result["code"]] else: result["code_meaning"] = "This code is unmapped" result["visualization"] = VISUALIZATION_URL.format(ip=result["ip"]) result_data.append(result) return action_result.set_status(phantom.APP_SUCCESS) except Exception as e: err = self._get_error_message_from_exception(e) err_msg = "Error occurred while processing results: {0}".format(err) return action_result.set_status(phantom.APP_ERROR, err_msg) def _process_query(self, data): # spawn container for every item returned if data["count"] > 0: try: for entry in data["data"]: ip = entry["ip"] self.save_progress("Processing IP address {}".format(ip)) container = { "custom_fields": {}, "data": {}, "name": "", "description": "Container added by GreyNoise", "label": self.get_config().get("ingest", {}).get("container_label"), "sensitivity": "amber", "source_data_identifier": "", "tags": entry["tags"], } if entry["classification"] == "malicious": container["severity"] = "high" else: container["severity"] = "low" artifact_cef = { 'ip': entry['ip'], 'classification': entry['classification'], 'first_seen': entry['first_seen'], 'last_seen': entry['last_seen'], 'actor': entry['actor'], 'organization': entry['metadata']['organization'], 'asn': entry['metadata']['asn'] } if entry['metadata']['country']: artifact_cef['country'] = entry['metadata']['country'] if entry['metadata']['city']: artifact_cef['city'] = entry['metadata']['city'] container["artifacts"] = [{ "cef": artifact_cef, "description": "Artifact added by GreyNoise", "label": container["label"], "name": "GreyNoise Query Language Entry", "source_data_identifier": container["source_data_identifier"], "severity": container["severity"] }] container["name"] = "GreyNoise Query Language Entry" ret_val, container_creation_msg, container_id = self.save_container(container) if phantom.is_fail(ret_val): self.save_progress("Error occurred while saving the container") self.debug_print(container_creation_msg) continue self.save_progress("Created %s" % container_id) except Exception as e: err = self._get_error_message_from_exception(e) err_msg = "Error occurred while processing query data. {}".format(err) self.debug_print(err_msg) return phantom.APP_ERROR return phantom.APP_SUCCESS else: self.save_progress("No results matching your GNQL query were found") return phantom.APP_SUCCESS def _on_poll(self, param): action_result = self.add_action_result(ActionResult(dict(param))) if self.is_poll_now(): self.save_progress('Due to the nature of the API, the ' 'artifact limits imposed by POLL NOW are ' 'ignored. As a result POLL NOW will simply ' 'create a container for each artifact.') config = self.get_config() param["query"] = config.get("on_poll_query") if self.is_poll_now(): param["size"] = param.get(phantom.APP_JSON_CONTAINER_COUNT, 25) else: on_poll_size = config.get("on_poll_size", 25) # Validate 'on_poll_size' config parameter ret_val, on_poll_size = self._validate_integer(action_result, on_poll_size, ONPOLL_SIZE_CONFIG_PARAM) if phantom.is_fail(ret_val): return action_result.get_status() param["size"] = on_poll_size if param["query"] == "Please refer to the documentation": self.save_progress("Default on poll query unchanged, please enter a valid GNQL query") return action_result.set_status(phantom.APP_ERROR, "Default on poll query unchanged") ret_val, data = self._gnql_query(param, is_poll=True, action_result=action_result) if phantom.is_fail(ret_val): return action_result.get_status() ret_val = self._process_query(data) if phantom.is_fail(ret_val): return action_result.set_status(phantom.APP_ERROR, "Failed to process the query") else: return action_result.set_status(phantom.APP_SUCCESS) def handle_action(self, param): ret_val = phantom.APP_SUCCESS action = self.get_action_identifier() if action == "test_connectivity": ret_val = self._test_connectivity(param) elif action == "lookup_ip": ret_val = self._lookup_ip(param) elif action == "ip_reputation": ret_val = self._ip_reputation(param) elif action == "gnql_query": ret_val = self._gnql_query(param) elif action == "lookup_ips": ret_val = self._lookup_ips(param) elif action == "on_poll": ret_val = self._on_poll(param) return ret_val def initialize(self): """Initialize the Phantom integration.""" self._state = self.load_state() config = self.get_config() self._api_key = config['api_key'] app_json = self.get_app_json() self._app_version = app_json["app_version"] self._headers = { "Accept": "application/json", "key": self._api_key, "User-Agent": "greynoise-phantom-integration-v{0}".format(self._app_version) } return phantom.APP_SUCCESS def finalize(self): """Finalize the Phantom integration.""" # Save the state, this data is saved across actions and
default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True) def _get_enable_remote_id(self): """ Getter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/options/config/enable_remote_id (boolean) YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's Remote-ID suboption as specified in RFC 3046. The remote-id field may be used to encode a user name, remote IP address, interface/port identifier, etc. """ return self.__enable_remote_id def _set_enable_remote_id(self, v, load=False): """ Setter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/options/config/enable_remote_id (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_enable_remote_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enable_remote_id() directly. YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's Remote-ID suboption as specified in RFC 3046. The remote-id field may be used to encode a user name, remote IP address, interface/port identifier, etc. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """enable_remote_id must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True)""", }) self.__enable_remote_id = t if hasattr(self, '_set'): self._set() def _unset_enable_remote_id(self): self.__enable_remote_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True) enable_interface_id = __builtin__.property(_get_enable_interface_id, _set_enable_interface_id) enable_remote_id = __builtin__.property(_get_enable_remote_id, _set_enable_remote_id) _pyangbind_elements = OrderedDict([('enable_interface_id', enable_interface_id), ('enable_remote_id', enable_remote_id), ]) class yc_state_openconfig_relay_agent__relay_agent_dhcpv6_options_state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6/options/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state data for DHCPv6 agent option on an interface """ __slots__ = ('_path_helper', '_extmethods', '__enable_interface_id','__enable_remote_id',) _yang_name = 'state' _yang_namespace = 'http://openconfig.net/yang/relay-agent' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__enable_interface_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) self.__enable_remote_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return ['relay-agent', 'dhcpv6', 'options', 'state'] def _get_enable_interface_id(self): """ Getter method for enable_interface_id, mapped from YANG variable /relay_agent/dhcpv6/options/state/enable_interface_id (boolean) YANG Description: Enables DHCPv6 OPTION_INTERFACE_ID (18) to identify the interface on which the client message was received. """ return self.__enable_interface_id def _set_enable_interface_id(self, v, load=False): """ Setter method for enable_interface_id, mapped from YANG variable /relay_agent/dhcpv6/options/state/enable_interface_id (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_enable_interface_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enable_interface_id() directly. YANG Description: Enables DHCPv6 OPTION_INTERFACE_ID (18) to identify the interface on which the client message was received. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """enable_interface_id must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""", }) self.__enable_interface_id = t if hasattr(self, '_set'): self._set() def _unset_enable_interface_id(self): self.__enable_interface_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) def _get_enable_remote_id(self): """ Getter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/options/state/enable_remote_id (boolean) YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's Remote-ID suboption as specified in RFC 3046. The remote-id field may be used to encode a user name, remote IP address, interface/port identifier, etc. """ return self.__enable_remote_id def _set_enable_remote_id(self, v, load=False): """ Setter method for enable_remote_id, mapped from YANG variable /relay_agent/dhcpv6/options/state/enable_remote_id (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_enable_remote_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enable_remote_id() directly. YANG Description: Sets DHCPv6 OPTION_REMOTE_ID (37). This option is the DHCPv6 equivalent for the IPv4 (DHCPv4) Relay Agent Option's Remote-ID suboption as specified in RFC 3046. The remote-id field may be used to encode a user name, remote IP address, interface/port identifier, etc. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """enable_remote_id must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""", }) self.__enable_remote_id = t if hasattr(self, '_set'): self._set() def _unset_enable_remote_id(self): self.__enable_remote_id = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-remote-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False) enable_interface_id = __builtin__.property(_get_enable_interface_id) enable_remote_id = __builtin__.property(_get_enable_remote_id) _pyangbind_elements = OrderedDict([('enable_interface_id', enable_interface_id), ('enable_remote_id', enable_remote_id), ]) class yc_options_openconfig_relay_agent__relay_agent_dhcpv6_options(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6/options. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Top-level container for DHCPv6 agent options on interfaces """ __slots__ = ('_path_helper', '_extmethods', '__config','__state',) _yang_name = 'options' _yang_namespace = 'http://openconfig.net/yang/relay-agent' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_options_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True) self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_options_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return ['relay-agent', 'dhcpv6', 'options'] def _get_config(self): """ Getter method for config, mapped from YANG variable /relay_agent/dhcpv6/options/config (container) YANG Description: Configuration data """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /relay_agent/dhcpv6/options/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: Configuration data """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_options_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """config must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_options_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""", }) self.__config = t if hasattr(self, '_set'): self._set() def _unset_config(self): self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_options_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True) def _get_state(self): """ Getter method for state, mapped from YANG variable /relay_agent/dhcpv6/options/state (container) YANG Description: Operational state data for DHCPv6 agent option on an interface """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /relay_agent/dhcpv6/options/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: Operational state data for DHCPv6 agent option on an interface """ if hasattr(v, "_utype"): v = v._utype(v) try: t =
) << 24 oOO += int ( o0oOOoOOoO00O0oO [ 1 ] ) << 16 oOO += int ( o0oOOoOOoO00O0oO [ 2 ] ) << 8 oOO += int ( o0oOOoOOoO00O0oO [ 3 ] ) self . address = oOO elif ( self . is_ipv6 ( ) ) : if 81 - 81: Ii1I if 8 - 8: I1ii11iIi11i * I1IiiI * OOooOOo - I1Ii111 - iII111i if 67 - 67: oO0o if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii if 15 - 15: o0oOOo0O0Ooo if 60 - 60: I1ii11iIi11i / I1Ii111 if 13 - 13: I1Ii111 if 52 - 52: II111iiii / OoO0O00 . Ii1I if 68 - 68: iII111i if 67 - 67: I1IiiI * I1IiiI if 100 - 100: iII111i * iII111i . Oo0Ooo if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo if 48 - 48: ooOoO0o + II111iiii if 73 - 73: II111iiii if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii if 35 - 35: II111iiii + IiII oO0Oo0oO00O = ( addr_str [ 2 : 4 ] == "::" ) try : addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str ) except : addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" ) if 54 - 54: IiII - Oo0Ooo addr_str = binascii . hexlify ( addr_str ) if 55 - 55: I11i * OOooOOo * I1ii11iIi11i . i11iIiiIii if ( oO0Oo0oO00O ) : addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ] if 93 - 93: Oo0Ooo % i11iIiiIii / i11iIiiIii . II111iiii % I11i self . address = int ( addr_str , 16 ) if 13 - 13: O0 . i1IIi - OoooooooOO . oO0o elif ( self . is_geo_prefix ( ) ) : oO0o0oO0O = lisp_geo ( None ) oO0o0oO0O . name = "geo-prefix-{}" . format ( oO0o0oO0O ) oO0o0oO0O . parse_geo_string ( addr_str ) self . address = oO0o0oO0O elif ( self . is_mac ( ) ) : addr_str = addr_str . replace ( "-" , "" ) oOO = int ( addr_str , 16 ) self . address = oOO elif ( self . is_e164 ( ) ) : addr_str = addr_str [ 1 : : ] oOO = int ( addr_str , 16 ) self . address = oOO << 4 elif ( self . is_dist_name ( ) ) : self . address = addr_str . replace ( "'" , "" ) if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii self . mask_len = self . host_mask_len ( ) if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo if 59 - 59: Ii1I def store_prefix ( self , prefix_str ) : if ( self . is_geo_string ( prefix_str ) ) : iI11I = prefix_str . find ( "]" ) iI1iiII1iii111 = len ( prefix_str [ iI11I + 1 : : ] ) * 8 elif ( prefix_str . find ( "/" ) != - 1 ) : prefix_str , iI1iiII1iii111 = prefix_str . split ( "/" ) else : iIi1IiI = prefix_str . find ( "'" ) if ( iIi1IiI == - 1 ) : return OoO00 = prefix_str . find ( "'" , iIi1IiI + 1 ) if ( OoO00 == - 1 ) : return iI1iiII1iii111 = len ( prefix_str [ iIi1IiI + 1 : OoO00 ] ) * 8 if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1 if 6 - 6: i11iIiiIii . I11i - OoooooooOO self . string_to_afi ( prefix_str ) self . store_address ( prefix_str ) self . mask_len = int ( iI1iiII1iii111 ) if 26 - 26: I1IiiI if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00 def zero_host_bits ( self ) : OoOo0Ooo0Oooo = ( 2 ** self . mask_len ) - 1 iiIiII1IiiI1 = self . addr_length ( ) * 8 - self . mask_len OoOo0Ooo0Oooo <<= iiIiII1IiiI1 self . address &= OoOo0Ooo0Oooo if 33 - 33: OoOoOO00 - I1IiiI + iII111i . iII111i if 68 - 68: OoO0O00 / OoO0O00 - I1IiiI + OoOoOO00 def is_geo_string ( self , addr_str ) : iI11I = addr_str . find ( "]" ) if ( iI11I != - 1 ) : addr_str = addr_str [ iI11I + 1 : : ] if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi oO0o0oO0O = addr_str . split ( "/" ) if ( len ( oO0o0oO0O ) == 2 ) : if ( oO0o0oO0O [ 1 ] . isdigit ( ) == False ) : return ( False ) if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I oO0o0oO0O = oO0o0oO0O [ 0 ] oO0o0oO0O = oO0o0oO0O . split ( "-" ) IiIIi1IIii = len ( oO0o0oO0O ) if ( IiIIi1IIii < 8 or IiIIi1IIii > 9 ) : return ( False ) if 86 - 86: I1Ii111 % ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + i11iIiiIii for OO00o0O0Oo in range ( 0 , IiIIi1IIii ) : if ( OO00o0O0Oo == 3 ) : if ( oO0o0oO0O [ OO00o0O0Oo ] in [ "N" , "S" ] ) : continue return ( False ) if 90 - 90: i11iIiiIii if ( OO00o0O0Oo == 7 ) : if ( oO0o0oO0O [ OO00o0O0Oo ] in [ "W" , "E" ] ) : continue return ( False ) if 92 - 92: i1IIi if ( oO0o0oO0O [ OO00o0O0Oo ] . isdigit ( ) == False ) : return ( False ) if 3 - 3: iIii1I11I1II1 . I1ii11iIi11i return ( True ) if 97 - 97: O0 if 82 - 82: OoooooooOO / I1Ii111 - ooOoO0o . I1Ii111 def string_to_afi ( self , addr_str ) : if ( addr_str . count ( "'" ) == 2 ) : self . afi = LISP_AFI_NAME return if 41 - 41: I11i . I11i if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6 elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4 elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164 elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC else : self . afi = LISP_AFI_NONE if 12 - 12: OoOoOO00 / I1IiiI if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo def print_address ( self ) : o0o0O00 = self . print_address_no_iid ( ) o0OOoOO = "[" + str ( self . instance_id ) for Ii11 in self . iid_list : o0OOoOO += "," + str ( Ii11 ) o0OOoOO += "]" o0o0O00 = "{}{}" . format ( o0OOoOO , o0o0O00 ) return ( o0o0O00 ) if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1 if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00 def print_address_no_iid ( self ) : if ( self . is_ipv4 ( ) ) : o0o0O00 = self . address o0Ooo0OoOo = o0o0O00 >> 24 oOO0O = ( o0o0O00 >> 16 ) & 0xff II1o0OoO = ( o0o0O00 >> 8 ) & 0xff oo0o0O00O = o0o0O00 & 0xff return ( "{}.{}.{}.{}" . format ( o0Ooo0OoOo , oOO0O , II1o0OoO , oo0o0O00O ) ) elif ( self . is_ipv6 ( ) ) : I1iiIiiii1111 = lisp_hex_string ( self . address ) . zfill ( 32 )
<gh_stars>1-10 #!/usr/bin/python # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ginstall.py.""" __author__ = '<EMAIL> (<NAME>)' import os import shutil import StringIO import struct import sys import tempfile import unittest import ginstall class FakeImgWManifest(object): def __init__(self, manifest): self.manifest = ginstall.ParseManifest(manifest) def ManifestVersion(self): return int(self.manifest['installer_version']) class GinstallTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() self.hnvram_dir = self.tmpdir + '/hnvram' self.script_out = self.tmpdir + '/out' self.old_path = os.environ['PATH'] self.old_bufsize = ginstall.BUFSIZE self.old_files = ginstall.F os.environ['GINSTALL_HNVRAM_DIR'] = self.hnvram_dir os.environ['GINSTALL_OUT_FILE'] = self.script_out os.environ['GINSTALL_TEST_FAIL'] = '' os.environ['PATH'] = 'testdata/bin:' + self.old_path os.makedirs(self.hnvram_dir) os.makedirs(self.tmpdir + '/dev') ginstall.F['ETCPLATFORM'] = 'testdata/etc/platform' ginstall.F['DEV'] = self.tmpdir + '/dev' ginstall.F['MMCBLK'] = self.tmpdir + '/dev/mmcblk0' ginstall.F['MTD_PREFIX'] = self.tmpdir + '/dev/mtd' ginstall.F['PROC_MTD'] = 'testdata/proc/mtd.GFHD100' ginstall.F['SECUREBOOT'] = 'testdata/tmp/gpio/ledcontrol/secure_boot' ginstall.F['SGDISK'] = 'testdata/bin/sgdisk' ginstall.F['SIGNINGKEY'] = 'testdata/signing_key.der' ginstall.F['SYSBLOCK'] = self.tmpdir + '/sys/block' os.makedirs(ginstall.F['SYSBLOCK']) ginstall.F['SYSCLASSMTD'] = 'testdata/sys/class/mtd' for i in range(0, 10): open(ginstall.F['MTD_PREFIX'] + str(i), 'w').write('1') os.mkdir(self.tmpdir + '/mmcblk0boot0') os.mkdir(self.tmpdir + '/mmcblk0boot1') ginstall.MMC_RO_LOCK['MMCBLK0BOOT0'] = ( self.tmpdir + '/mmcblk0boot0/force_ro') ginstall.MMC_RO_LOCK['MMCBLK0BOOT1'] = ( self.tmpdir + '/mmcblk0boot1/force_ro') ginstall.PROGRESS_EXPORT_PATH = os.path.join(self.tmpdir, 'ginstall') # default OS to 'fiberos' self.WriteOsFile('fiberos') def tearDown(self): os.environ['PATH'] = self.old_path shutil.rmtree(self.tmpdir, ignore_errors=True) ginstall.F = self.old_files def WriteVersionFile(self, version): """Create a fake /etc/version file in /tmp.""" filename = self.tmpdir + '/version' open(filename, 'w').write(version) ginstall.F['ETCVERSION'] = filename def WriteOsFile(self, os_name): """Create a fake /etc/os file in /tmp.""" filename = self.tmpdir + '/os' open(filename, 'w').write(os_name) ginstall.F['ETCOS'] = filename def WriteHnvramAttr(self, attr, val): filename = self.hnvram_dir + '/%s' % attr open(filename, 'w').write(val) def ReadHnvramAttr(self, attr): filename = self.hnvram_dir + '/%s' % attr try: return open(filename).read() except IOError: return None def testVerify(self): self.assertTrue(ginstall.Verify( open('testdata/img/loader.bin'), open('testdata/img/loader.sig'), open('testdata/etc/google_public.der'))) def testVerifyFailure(self): self.assertFalse(ginstall.Verify( open('testdata/img/loader.bin'), open('testdata/img/loader_bad.sig'), open('testdata/etc/google_public.der'))) def testIsIdenticalAndProgressBar(self): ginstall.BUFSIZE = 1 ginstall.ProgressBar.DOTSIZE = 1 loader = 'testdata/img/loader.bin' loader1 = 'testdata/img/loader1.bin' self.assertTrue(ginstall.IsIdentical( 'testloader', open(loader), open(loader))) self.assertFalse(ginstall.IsIdentical( 'testloader', open(loader), open(loader1))) # Test exported progress bar. success_line = '.' * (len(open(loader).read()) - 1) + '\n' failure_line = '' for a, b in zip(open(loader).read(), open(loader1).read()): if a != b: break failure_line += '.' progress_file = open( os.path.join(ginstall.PROGRESS_EXPORT_PATH, 'progress')).read() self.assertEqual(progress_file, success_line + failure_line) def testVerifyAndIsIdentical(self): loader = open('testdata/img/loader.bin') self.assertTrue(ginstall.Verify( loader, open('testdata/img/loader.sig'), open('testdata/etc/google_public.der'))) self.assertRaises(IOError, ginstall.IsIdentical, 'loader', loader, open('testdata/img/loader.bin')) loader.seek(0) self.assertTrue(ginstall.IsIdentical( 'loader', loader, open('testdata/img/loader.bin'))) loader.seek(0) self.assertFalse(ginstall.IsIdentical( 'loader', loader, open('testdata/img/loader1.bin'))) def testIsMtdNand(self): mtd = ginstall.F['MTD_PREFIX'] self.assertFalse(ginstall.IsMtdNand(mtd + '6')) self.assertTrue(ginstall.IsMtdNand(mtd + '7')) def testInstallToMtdNandFails(self): # A nanddump that writes incorrect data to stdout ginstall.NANDDUMP = 'testdata/bin/nanddump.wrong' in_f = StringIO.StringIO('Testing123') mtdfile = self.tmpdir + '/mtd' self.assertRaises(IOError, ginstall.InstallToMtd, in_f, mtdfile) def testWriteMtd(self): origfile = open('testdata/random', 'r') origsize = os.fstat(origfile.fileno())[6] ginstall.BUFSIZE = 1024 mtd = ginstall.F['MTD_PREFIX'] open(mtd + '4', 'w').close() writesize = ginstall.InstallToMtd(origfile, mtd + '4') self.assertEqual(writesize, origsize) # check that data was written to MTDBLOCK origfile.seek(0, os.SEEK_SET) self.assertEqual(origfile.read(), open(mtd + '4').read()) def testWriteMtdEraseException(self): origfile = open('testdata/random', 'r') self.assertRaises(IOError, ginstall.InstallToMtd, origfile, '/dev/mtd0') def testWriteMtdVerifyException(self): origfile = open('testdata/random', 'r') ginstall.MTDBLOCK = '/dev/zero' # verify should fail, destfile will read back zero. self.assertRaises(IOError, ginstall.InstallToMtd, origfile, '/dev/mtd4') def testWriteUbiException(self): os.environ['GINSTALL_TEST_FAIL'] = 'fail' os.system('ubiformat') origfile = open('testdata/random', 'r') self.assertRaises(IOError, ginstall.InstallRawFileToUbi, origfile, 'mtd0.tmp') def testSetBootPartition(self): self.WriteOsFile('fiberos') ginstall.SetBootPartition('fiberos', 0) self.assertEqual('kernel0', self.ReadHnvramAttr('ACTIVATED_KERNEL_NAME')) ginstall.SetBootPartition('fiberos', 1) self.assertEqual('kernel1', self.ReadHnvramAttr('ACTIVATED_KERNEL_NAME')) ginstall.SetBootPartition('android', 0) self.assertEqual('a', self.ReadHnvramAttr('ANDROID_ACTIVE_PARTITION')) self.assertEqual('android', self.ReadHnvramAttr('BOOT_TARGET')) ginstall.SetBootPartition('android', 1) self.assertEqual('b', self.ReadHnvramAttr('ANDROID_ACTIVE_PARTITION')) self.assertEqual('android', self.ReadHnvramAttr('BOOT_TARGET')) self.WriteOsFile('android') ginstall.SetBootPartition('fiberos', 0) self.assertEqual('kernel0', self.ReadHnvramAttr('ACTIVATED_KERNEL_NAME')) self.assertEqual('fiberos', self.ReadHnvramAttr('BOOT_TARGET')) ginstall.SetBootPartition('fiberos', 1) self.assertEqual('kernel1', self.ReadHnvramAttr('ACTIVATED_KERNEL_NAME')) self.assertEqual('fiberos', self.ReadHnvramAttr('BOOT_TARGET')) ginstall.SetBootPartition('android', 0) self.assertEqual('a', self.ReadHnvramAttr('ANDROID_ACTIVE_PARTITION')) ginstall.SetBootPartition('android', 1) self.assertEqual('b', self.ReadHnvramAttr('ANDROID_ACTIVE_PARTITION')) # also verify the hnvram command history for good measures out = open(self.script_out).read().splitlines() self.assertEqual(out[0], 'hnvram -q -w ACTIVATED_KERNEL_NAME=kernel0') self.assertEqual(out[1], 'hnvram -q -w ACTIVATED_KERNEL_NAME=kernel1') self.assertEqual(out[2], 'hnvram -q -w ANDROID_ACTIVE_PARTITION=a') self.assertEqual(out[3], 'hnvram -q -w BOOT_TARGET=android') self.assertEqual(out[4], 'hnvram -q -w ANDROID_ACTIVE_PARTITION=b') self.assertEqual(out[5], 'hnvram -q -w BOOT_TARGET=android') self.assertEqual(out[6], 'hnvram -q -w ACTIVATED_KERNEL_NAME=kernel0') self.assertEqual(out[7], 'hnvram -q -w BOOT_TARGET=fiberos') self.assertEqual(out[8], 'hnvram -q -w ACTIVATED_KERNEL_NAME=kernel1') self.assertEqual(out[9], 'hnvram -q -w BOOT_TARGET=fiberos') self.assertEqual(out[10], 'hnvram -q -w ANDROID_ACTIVE_PARTITION=a') self.assertEqual(out[11], 'hnvram -q -w ANDROID_ACTIVE_PARTITION=b') def testParseManifest(self): l = ('installer_version: 99\nimage_type: fake\n' 'platforms: [ GFHD100, GFMS100 ]\n') in_f = StringIO.StringIO(l) actual = ginstall.ParseManifest(in_f) expected = {'installer_version': '99', 'image_type': 'fake', 'platforms': ['GFHD100', 'GFMS100']} self.assertEqual(actual, expected) l = 'installer_version: 99\nimage_type: fake\nplatforms: GFHD007\n' in_f = StringIO.StringIO(l) actual = ginstall.ParseManifest(in_f) expected = {'installer_version': '99', 'image_type': 'fake', 'platforms': 'GFHD007'} self.assertEqual(actual, expected) def testGetKey(self): key = ginstall.GetKey() self.assertEqual(key, 'This is a signing key.\n') def testPlatformRoutines(self): self.assertEqual(ginstall.GetPlatform(), 'GFUNITTEST') in_f = StringIO.StringIO('platforms: [ GFUNITTEST, GFFOOBAR ]\n') manifest = ginstall.ParseManifest(in_f) self.assertTrue(ginstall.CheckPlatform(manifest)) def testGetOs(self): self.WriteOsFile('fiberos') self.assertEqual('fiberos', ginstall.GetOs()) self.WriteOsFile('android') self.assertEqual('android', ginstall.GetOs()) # in case file doesn't exist, default is 'fiberos' os.remove(self.tmpdir + '/os') self.assertEqual('fiberos', ginstall.GetOs()) def testGetMtdPrefix(self): self.WriteOsFile('fiberos') self.assertEqual(ginstall.F['MTD_PREFIX'], ginstall.GetMtdPrefix()) self.WriteOsFile('android') self.assertEqual(ginstall.F['MTD_PREFIX-ANDROID'], ginstall.GetMtdPrefix()) # unknown OS returns 'fiberos' self.WriteOsFile('windows') self.assertEqual(ginstall.F['MTD_PREFIX'], ginstall.GetMtdPrefix()) def testGetMmcblk0Prefix(self): self.WriteOsFile('fiberos') self.assertEqual(ginstall.F['MMCBLK0'], ginstall.GetMmcblk0Prefix()) self.WriteOsFile('android') self.assertEqual(ginstall.F['MMCBLK0-ANDROID'], ginstall.GetMmcblk0Prefix()) # unknown OS returns 'fiberos' self.WriteOsFile('windows') self.assertEqual(ginstall.F['MMCBLK0'], ginstall.GetMmcblk0Prefix()) def testGetInternalHarddisk(self): self.assertEqual(ginstall.GetInternalHarddisk(), None) os.mkdir(ginstall.F['SYSBLOCK'] + '/sda') os.symlink(ginstall.F['SYSBLOCK'] + '/sda/usb_disk', ginstall.F['SYSBLOCK'] + '/sda/device') os.mkdir(ginstall.F['SYSBLOCK'] + '/sdc') os.symlink(ginstall.F['SYSBLOCK'] + '/sdc/sata_disk', ginstall.F['SYSBLOCK'] + '/sdc/device') expected = ginstall.F['DEV'] + '/sdc' self.assertEqual(ginstall.GetInternalHarddisk(), expected) os.mkdir(ginstall.F['SYSBLOCK'] + '/sdb') expected = ginstall.F['DEV'] + '/sdb' self.assertEqual(ginstall.GetInternalHarddisk(), expected) def MakeImgWManifestVersion(self, version): in_f = StringIO.StringIO('installer_version: %s\n' % version) return FakeImgWManifest(in_f) def testCheckManifestVersion(self): manifest = {} for v in ['2', '3', '4']: manifest['installer_version'] = v self.assertTrue(ginstall.CheckManifestVersion(manifest)) for v in ['1', '5']: manifest['installer_version'] = v self.assertRaises(ginstall.Fatal, ginstall.CheckManifestVersion, manifest) for v in ['3junk']: manifest['installer_version'] = v self.assertRaises(ValueError, ginstall.CheckManifestVersion, manifest) def MakeManifestWMinimumVersion(self, version): in_f = StringIO.StringIO('minimum_version: %s\n' % version) return ginstall.ParseManifest(in_f) def testCheckMinimumVersion(self): self.WriteVersionFile('gftv200-38.10') for v in [ 'gftv200-38.5', 'gftv200-38-pre2-58-g72b3037-da', 'gftv200-38-pre2']: manifest = self.MakeManifestWMinimumVersion(v) self.assertTrue(ginstall.CheckMinimumVersion(manifest)) for v in [ 'gftv200-39-pre0-58-g72b3037-da', 'gftv200-39-pre0', 'gftv200-39-pre1-58-g72b3037-da', 'gftv200-39-pre1', 'gftv200-38.11', ]: manifest = self.MakeManifestWMinimumVersion(v) self.assertRaises(ginstall.Fatal, ginstall.CheckMinimumVersion, manifest) manifest = self.MakeManifestWMinimumVersion('junk') self.assertRaises(ginstall.Fatal, ginstall.CheckMinimumVersion, manifest) def testCheckMisc(self): ginstall.F['ETCPLATFORM'] = 'testdata/etc/platform.GFHD200' for v in [ 'gftv200-38.11', 'gftv200-39-pre2-58-g72b3037-da', 'gftv200-39-pre2']: manifest = {'version': v} ginstall.CheckMisc(manifest) # checking that it does not raise exception for v in [ 'gftv200-39-pre0-58-g72b3037-da', 'gftv200-39-pre0', 'gftv200-39-pre1-58-g72b3037-da', 'gftv200-39-pre1', 'gftv200-38.9', 'gftv200-38.10' ]: manifest = {'version': v} self.assertRaises(ginstall.Fatal, ginstall.CheckMisc, manifest) def MakeManifestWithFilenameSha1s(self, filename): m = ('installer_version: 4\n' 'image_type: unlocked\n' 'version: gftv254-48-pre2-1100-g25ff8d0-ck\n' 'platforms: [ GFHD254 ]\n') if filename is not None: m += '%s-sha1: 9b5236c282b8c11b38a630361b6c690d6aaa50cb\n' % filename in_f = StringIO.StringIO(m) return ginstall.ParseManifest(in_f) def testGetOsFromManifest(self): # android specific image names return 'android' for img in ginstall.ANDROID_IMAGES: manifest = self.MakeManifestWithFilenameSha1s(img) self.assertEqual('android', ginstall.GetOsFromManifest(manifest)) # fiberos image names or anything non-android returns 'fiberos' for img in ['rootfs.img', 'kernel.img', 'whatever.img']: manifest = self.MakeManifestWithFilenameSha1s(img) self.assertEqual('fiberos', ginstall.GetOsFromManifest(manifest)) # no sha1 entry in the manifest returns 'fiberos' manifest = self.MakeManifestWithFilenameSha1s(None) self.assertEqual('fiberos', ginstall.GetOsFromManifest(manifest)) def testGetBootedPartition(self): ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.none' self.assertEqual(None, ginstall.GetBootedPartition()) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.0' self.assertEqual(0, ginstall.GetBootedPartition()) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.1' self.assertEqual(1, ginstall.GetBootedPartition()) # Android ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.none' self.assertEqual(None, ginstall.GetBootedPartition()) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.0' self.assertEqual(0, ginstall.GetBootedPartition()) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.1' self.assertEqual(1, ginstall.GetBootedPartition()) # Prowl ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.none' self.assertEqual(ginstall.GetBootedPartition(), None) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.0' self.assertEqual(ginstall.GetBootedPartition(), 0) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.1' self.assertEqual(ginstall.GetBootedPartition(), 1) def testGetActivePartitionFromHNVRAM(self): # FiberOS looks at ACTIVATED_KERNEL_NAME, not ANDROID_ACTIVE_PARTITION # 0 self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '0') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('fiberos')) self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', '0') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('fiberos')) self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', '1') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('fiberos')) # 1 self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '1') self.assertEqual(1, ginstall.GetActivePartitionFromHNVRAM('fiberos')) self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', '0') self.assertEqual(1, ginstall.GetActivePartitionFromHNVRAM('fiberos')) # Android looks at ANDROID_ACTIVE_PARTITION, not ACTIVATED_KERNEL_NAME # 0 self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', '0') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('android')) self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '0') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('android')) self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '1') self.assertEqual(0, ginstall.GetActivePartitionFromHNVRAM('android')) # 1 self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', '1') self.assertEqual(1, ginstall.GetActivePartitionFromHNVRAM('android')) self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '0') self.assertEqual(1, ginstall.GetActivePartitionFromHNVRAM('android')) def TestGetPartition(self): self.assertEqual(0, ginstall.GetPartition('primary', 'fiberos')) self.assertEqual(0, ginstall.GetPartition(0, 'fiberos')) self.assertEqual(1, ginstall.GetPartition('secondary', 'fiberos')) self.assertEqual(1, ginstall.GetPartition(1, 'fiberos')) self.assertEqual(0, ginstall.GetPartition('primary', 'android')) self.assertEqual(0, ginstall.GetPartition(0, 'android')) self.assertEqual(1, ginstall.GetPartition('secondary', 'android')) self.assertEqual(1, ginstall.GetPartition(1, 'android')) # other: FiberOS->FiberOS self.WriteOsFile('fiberos') ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.none' self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.0' self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.1' self.assertEqual(0, ginstall.GetPartition('other', 'fiberos')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.none' self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.0' self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.prowl.1' self.assertEqual(0, ginstall.GetPartition('other', 'fiberos')) # other: FiberOS->Android self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', 'a') self.assertEqual(1, ginstall.GetPartition('other', 'android')) self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', 'b') self.assertEqual(0, ginstall.GetPartition('other', 'android')) self.WriteHnvramAttr('ANDROID_ACTIVE_PARTITION', 'bla') self.assertEqual(1, ginstall.GetPartition('other', 'android')) # other: Android->FiberOS self.WriteOsFile('android') self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '0') self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', '1') self.assertEqual(0, ginstall.GetPartition('other', 'fiberos')) self.WriteHnvramAttr('ACTIVATED_KERNEL_NAME', 'bla') self.assertEqual(1, ginstall.GetPartition('other', 'fiberos')) # other: Android->Android ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.none' self.assertEqual(1, ginstall.GetPartition('other', 'android')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.0' self.assertEqual(1, ginstall.GetPartition('other', 'android')) ginstall.F['PROC_CMDLINE'] = 'testdata/proc/cmdline.android.1' self.assertEqual(0, ginstall.GetPartition('other', 'android')) # Test prowl
#! /usr/bin/env python3 # Copyright(c) 2019, Intel Corporation # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of Intel Corporation nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import from __future__ import print_function from __future__ import division from opae.diag.eth_group import eth_group from .common import exception_quit, FpgaFinder, COMMON, hexint from .fpgastats import FPGASTATS from .fpgalpbk import FPGALPBK import argparse import mmap import sys import struct import glob import os import time MAPSIZE = mmap.PAGESIZE MAPMASK = MAPSIZE - 1 DFH_TYPE_SHIFT = 60 DFH_TYPE_MASK = 0xf DFH_TYPE_AFU = 0x1 DFH_TYPE_FIU = 0x4 DFH_ID_SHIFT = 0 DFH_ID_MASK = 0xfff DFH_ID_UPL = 0x1f UPL_UUID_L = 0xA013D76F19C4D8D1 UPL_UUID_H = 0xFA00A55CCA8C4C4B UUID_L_OFFSET_REG = 0x08 UUID_H_OFFSET_REG = 0x10 NEXT_AFU_OFFSET_REG = 0x18 NEXT_AFU_OFFSET_MASK = 0xffffff VC_MODE_8x10G = 0 VC_MODE_2x1x25G = 2 VC_MODE_2x2x25G = 4 VC_MODE_NAME = {0: '8x10G', 1: '4x25G', 2: '2x1x25G', 3: '6x25G', 4: '2x2x25G'} INDIRECT_CTRL_REG = 0x3 INDIRECT_DATA_REG = 0x4 MIN_TEST_PKT_NUM = 1 MAX_TEST_PKT_NUM = 100000000 DEFAULT_TEST_PKT_NUM = 100000 MIN_TEST_PKT_LEN = 46 MAX_TEST_PKT_LEN = 1500 DEFAULT_TEST_PKT_LEN = 128 VC_INFO = {} def pci_read(pci_dev_path, addr): base = addr & ~MAPMASK offset = addr & MAPMASK data = b'\xff'*8 with open(pci_dev_path, "rb", 0) as f: mm = mmap.mmap(f.fileno(), MAPSIZE, mmap.MAP_SHARED, mmap.PROT_READ, 0, base) # read data (64 bit) data = mm[offset:offset+8] value, = struct.unpack('<Q', data) # close mapping mm.close() return value def pci_write(pci_dev_path, addr, value): base = addr & ~MAPMASK offset = addr & MAPMASK data = struct.pack('<Q', value) # mmap PCI resource with open(pci_dev_path, "r+b", 0) as f: mm = mmap.mmap(f.fileno(), MAPSIZE, mmap.MAP_SHARED, mmap.PROT_WRITE, 0, base) # write data (64 bit) mm[offset:offset+8] = data # close mapping mm.close() # args: 0 - sbdf, 1 - device offset, 2 - addr, 3 - data, 4 - write check def rw_data(*args): upl_base = VC_INFO.get('upl_base', None) if upl_base is None: exception_quit("Error: UPL not found in FPGA {}".format(args[0]), 6) pci_dev_path = '/sys/bus/pci/devices/{}/resource2'.format(args[0]) addr = upl_base | (args[1] << 12) | (args[2] << 3) if len(args) == 3: # read return pci_read(pci_dev_path, addr) elif len(args) == 5: # write pci_write(pci_dev_path, addr, args[3]) else: exception_quit("Error: Bad arguments number", 7) # args: 0 - sbdf, 1 - device_offset, 2 - addr, 3 - data, 4 - write check def indir_rw_data(*args): addr = ((args[1] << 17) | args[2]) << 32 rdata = 0 if len(args) == 3: # read cmd = 0x4 << 60 rw_data(args[0], 0x0, INDIRECT_CTRL_REG, cmd | addr, 0) while (rdata >> 32) != 0x1: # waiting for read valid rdata = rw_data(args[0], 0x0, INDIRECT_DATA_REG) # rdata valid return rdata & 0xffffffff elif len(args) == 5: # write cmd = 0x8 << 60 rw_data(args[0], 0x0, INDIRECT_CTRL_REG, cmd | addr | args[3], 0) while (rdata >> 32) != 0x1: # waiting for write complete rdata = rw_data(args[0], 0x0, INDIRECT_DATA_REG) # rdata valid rdata = 0 if args[4]: cmd = 0x4 << 60 rw_data(args[0], 0x0, INDIRECT_CTRL_REG, cmd | addr, 0) while (rdata >> 32) != 0x1: # waiting for read valid rdata = rw_data(args[0], 0x0, INDIRECT_DATA_REG) # rdata valid if args[3] != (rdata & 0xffffffff): print('{:#x} {:#x}'.format(args[3], rdata)) exception_quit("Error: failed comparison of wrote data", 8) else: exception_quit("Error: Bad arguments number", 7) def get_sbdf_mode_mapping(sbdf, args): global VC_INFO sysfs_path = glob.glob(os.path.join('/sys/bus/pci/devices', sbdf, 'fpga_region', 'region*', 'dfl-fme*', 'bitstream_id')) if len(sysfs_path) == 0: exception_quit("Error: bitstream_id not found", 4) with open(sysfs_path[0], 'r') as f: bitstream_id = f.read().strip() build_flags = (int(bitstream_id, 16) >> 24) & 0xff if (build_flags & 0x01) == 0x00: exception_quit("FPGA {} does not support bypass mode".format(sbdf), 5) VC_INFO['mode'] = (int(bitstream_id, 16) >> 32) & 0xf print('Mode: {}'.format(VC_MODE_NAME.get(VC_INFO['mode'], 'unknown'))) if VC_INFO['mode'] == VC_MODE_8x10G: VC_INFO['total_mac'] = 8 VC_INFO['demux_offset'] = 0x100 elif VC_INFO['mode'] == VC_MODE_2x1x25G: VC_INFO['total_mac'] = 2 VC_INFO['demux_offset'] = 0x40 elif VC_INFO['mode'] == VC_MODE_2x2x25G: VC_INFO['total_mac'] = 4 VC_INFO['demux_offset'] = 0x80 else: exception_quit("FPGA {} not support bypass mode".format(sbdf), 5) c = COMMON() args.ports = c.get_port_list(args.port, VC_INFO.get('total_mac')) sysfs_path = glob.glob(os.path.join('/sys/bus/pci/devices', sbdf, 'fpga_region', 'region*', 'dfl-fme*', 'bitstream_metadata')) if len(sysfs_path) == 0: exception_quit("Error: bitstream_id not found", 4) with open(sysfs_path[0], 'r') as f: bitstream_md = int(f.read().strip(), 16) seed = (bitstream_md >> 4) & 0xfff print("Seed: {:#x}".format(seed)) def get_sbdf_upl_mapping(sbdf): global VC_INFO pci_dev_path = '/sys/bus/pci/devices/{}/resource2'.format(sbdf) addr = 0 while True: header = pci_read(pci_dev_path, addr) feature_type = (header >> DFH_TYPE_SHIFT) & DFH_TYPE_MASK feature_id = (header >> DFH_ID_SHIFT) & DFH_ID_MASK if feature_type == DFH_TYPE_AFU and feature_id == DFH_ID_UPL: uuid_l = pci_read(pci_dev_path, addr+UUID_L_OFFSET_REG) uuid_h = pci_read(pci_dev_path, addr+UUID_H_OFFSET_REG) if uuid_l == UPL_UUID_L and uuid_h == UPL_UUID_H: VC_INFO['upl_base'] = addr break else: msg = "FPGA {} has no packet generator for test".format(sbdf) exception_quit(msg, 6) if feature_type in [DFH_TYPE_AFU, DFH_TYPE_FIU]: next_afu_offset = pci_read(pci_dev_path, addr+NEXT_AFU_OFFSET_REG) next_afu_offset &= NEXT_AFU_OFFSET_MASK if next_afu_offset == 0: exception_quit("Error: UPL not found in FPGA {}".format(sbdf), 6) else: addr += next_afu_offset next_afu_offset = 0 def clear_stats(f, info, args): global VC_INFO if args.clear: print('Clearing statistics of MACs ...') vc_mode = VC_INFO.get('mode', None) if vc_mode is None: exception_quit("FPGA is not in bypass mode", 5) offset = VC_INFO.get('demux_offset', 0x100) for w in info: _, mac_total, _ = info[w] for keys, values in args.items(): eth_group_inst = eth_group() ret = eth_group_inst.eth_group_open(values[0]) if ret != 0: return None for i in args.ports: if vc_mode == VC_MODE_8x10G: eth_group_inst.eth_group_reg_write(eth_group_inst, 'mac', i, 0x140, 0x1) eth_group_inst.eth_group_reg_write(eth_group_inst, 'mac', i, 0x1C0, 0x1) else: eth_group_inst.eth_group_reg_write(eth_group_inst, 'mac', i, 0x845, 0x1) eth_group_inst.eth_group_reg_write(eth_group_inst, 'mac', i, 0x945, 0x1) reg = 0x1 + i * 8 eth_group_inst.eth_group_reg_write(eth_group_inst, 'eth', 0, reg, 0x0) eth_group_inst.eth_group_reg_write(eth_group_inst, 'eth', 0, offset + reg, 0x0) time.sleep(0.1) eth_group_inst.eth_group_close() def enable_loopback(args): if args.loopback: args.direction = 'local' args.side = 'line' args.type = 'serial' args.en = 1 fl = FPGALPBK(args) fl.eth_group_start() if args.debug: print('Loopback enabled') time.sleep(0.1) def disable_loopback(args): if args.loopback: args.direction = 'local' args.side = 'line' args.type = 'serial' args.en = 0 fl = FPGALPBK(args) fl.eth_group_start() if args.debug: print('Loopback disabled') time.sleep(0.1) def test_wait(sbdf, timeout, args): left_time = timeout while left_time > 0: sleep_time = 0.1 if left_time > 0.1 else left_time time.sleep(sleep_time) left_time -= sleep_time if indir_rw_data(sbdf, 0, 0x100F) == 1: if args.debug: period = timeout - left_time rate = (args.number * args.length * 8) / period / 1e9 print('Time consuming {}s, data rate {:.2f}Gbps'.format(period, rate)) break def fvl_bypass_mode_test(sbdf, args): global VC_INFO get_sbdf_upl_mapping(sbdf) fs = FPGASTATS(args) info = fs.eth_group_info(fs.eth_grps) clear_stats(fs, info, args) indir_rw_data(sbdf, 0, 0x1011, 0xff, 0) # clear monitor statistic indir_rw_data(sbdf, 0, 0x1000, 0xAAAAAAAA, 0) # indir_rw_data(sbdf, 0, 0x1001, 0xAAAAAAAA, 0) # DST MAC to generator indir_rw_data(sbdf, 0, 0x1002, 0xBBBBBBBB, 0) # indir_rw_data(sbdf, 0, 0x1003, 0xBBBBBBBB, 0) # SRC MAC to generator indir_rw_data(sbdf, 0, 0x1009, 0xAAAAAAAA, 0) # indir_rw_data(sbdf, 0, 0x100A, 0xAAAAAAAA, 0) # DST MAC to monitor indir_rw_data(sbdf, 0, 0x100B, 0xBBBBBBBB, 0) # indir_rw_data(sbdf, 0, 0x100C, 0xBBBBBBBB, 0) # SRC MAC to monitor indir_rw_data(sbdf, 0, 0x1005, args.length, 0) # set GEN_PKT_LENGTH indir_rw_data(sbdf, 0, 0x1006, 0x1, 0) # set EN_PKT_DELAY time.sleep(0.1) mac_total = VC_INFO.get('total_mac', 2) if 'all' in args.port: pkt_num = mac_total * args.number # packets num to be sent on all mac wait_time = pkt_num * args.length / 1e9 if args.debug: print('Timeout is set to {}s'.format(wait_time)) print('Sending packets to all ports ...') indir_rw_data(sbdf, 0, 0x1004, pkt_num, 0) # set GEN_PKT_NUMBER indir_rw_data(sbdf, 0, 0x100D, pkt_num, 0) # set MON_PKT_NUMBER indir_rw_data(sbdf, 0, 0x100E, 0x5, 0) # set MON_PKT_CTRL indir_rw_data(sbdf, 0,
"{", "]": "}", ";": ":", "'": "@", ",": "<", ".": ">", "/": "?", "\\": "|"} # Just gets the keys from the shifts dictionary # tuple of numbers 0 - 9 shift_keys = shifts.keys() # Static variable that holds the total number of text boxes in the program # Used in checking which text box is in focus # A text box must be in focus in order to register keyboard input TextBoxes = 0 # blocked_chars = the characters the box will not accept # char_limit = the maximum number of characters allowed in the textbox def __init__(self, x, y, width, height, font, blocked_chars, char_limit = None): Element.__init__(self, x, y, width, height, font) self.blocked_chars = blocked_chars self.charLimit = char_limit # text is a string that holds the characters input to the text box self.text = "" # txt_obj is a text object used for rendering the input self.txt_obj = font.render(self.text, 1, black) self.update_text() # Adds one to the count of text boxes Textbox.TextBoxes += 1 # By default is not in focus self.is_focused = False # If it is the only text box, automatically in focus if Textbox.TextBoxes < 2: self.is_focused = True # Boolean value of whether the shift key is held down self.shift_pressed = False # If the user clicks on the text box, it is in focus # If the user hasn't clicked on the text box, defocuses it to prevent multiple text boxes in focus at one time def on_click(self, mouse_x, mouse_y): if self.rect.collidepoint(mouse_x, mouse_y): self.is_focused = True else: self.is_focused = False # Called every time a key is pressed def on_char_typed(self, key_pressed): # Only runs the code if the textbox is in focus if self.is_focused: # Special case for backspace, removes the last letter of the string if key_pressed == pygame.K_BACKSPACE: self.text = self.text[:-1] # Checks if shift key is pressed elif key_pressed == pygame.K_LSHIFT or key_pressed == pygame.K_RSHIFT: self.shift_pressed = True # Checks the character limit has not been reached elif len(self.text) < self.charLimit: # is_allowed is True by default, if the key input is a blocked char, it become false is_allowed = True # is_special is False by default # If key input is a special character (from special_chars list) then it is True is_special = False # key_name gets the name of the corresponding pygame key code # for alphanumeric characters, is the same as the character itself key_name = pygame.key.name(key_pressed) # Checking if character is blocked for c in self.blocked_chars: if key_name == c: is_allowed = False break # Checking if character is special for s in Textbox.special_chars: if key_name == s: is_special = True break # If the character is allowed and isn't a special character, it can be added normally if is_allowed and not is_special: # If the shift key is being held down if self.shift_pressed: # is_shift_key is False by default but becomes true if # the key being pressed is the shift_keys list is_shift_key = False # Checks through the shift_keys list for the key being pressed for k in Textbox.shift_keys: if key_name == k: is_shift_key = True # If the key is in shift_key, use that key's shift equivalent # Add it to the text string if is_shift_key: self.text = self.text + Textbox.shifts[key_name] # If not in shift_keys, just add the uppercase equivalent of the letter typed else: self.text = self.text + key_name.upper() # If shift key not pressed, just add the character typed to text else: self.text = self.text + key_name # If it's an allowed character and is a special character # Special cases are dealt with here elif is_allowed and is_special: # Key name of the space bar is not ' ', must be manually checked for if key_pressed == pygame.K_SPACE: self.text = self.text + " " # Updates the text object being drawn to the screen self.update_text() # Called every time a key is released def on_key_up(self, key_up): # Checks if shift is unpressed if key_up == pygame.K_RSHIFT or key_up == pygame.K_LSHIFT: self.shift_pressed = False def draw(self, screen): # Draws white background box pygame.draw.rect(screen, white, (self.x, self.y, self.width, self.height)) # Draws outline if focused if self.is_focused: pygame.draw.lines(screen, black, True, ((self.x, self.y), (self.x + self.width, self.y), (self.x + self.width, self.y + self.height), (self.x, self.y + self.height)), 2) # Draws text if not empty if self.text != "": screen.blit(self.txt_obj, (self.x + 2, self.y + 2)) # Called when the text in the text box is updated, recreates the text object def update_text(self): self.txt_obj = self.font.render(self.text, 1, black) @property def text(self): return self._text # Method for externally setting the text in the text box # changes text to the parameter new_text then updates the text object @text.setter def text(self, new_text): self._text = new_text self.update_text() # Colour patch draws a rectangle of a specific colour on the screen # Useful for showing output of RGB selectors # Inherits all methods and attributes from Element class ColourPatch(Element): # rgb - A tuple of 3 integers between 0 and 255 to represent a 24 bit colour def __init__(self, x, y, width, height, font, rgb): Element.__init__(self, x, y, width, height, font) self.rgb = rgb @property def rgb(self): return self._rgb # Setter method acts as validation to make sure an RGB colour does not contain numbers outside of 0-255 @rgb.setter def rgb(self, new_rgb): valid = True for num in new_rgb: if num > 255 or num < 0: valid = False if valid: self._rgb = new_rgb else: print("RGB colour must be between 0 and 255") def draw(self, screen): # Draws the colour pygame.draw.rect(screen, self.rgb, (self.x, self.y, self.width, self.height)) # Draws a border pygame.draw.lines(screen, black, True, ((self.x, self.y), (self.x2, self.y), (self.x2, self.y2), (self.x, self.y2))) # A Group is a list of Elements that can be addressed all at once # Inherits all methods and attributes from Element # Uses a list to contain all elements contained within it # Rather than calling the update method of every element in the list, # you can just call the update method of the group, which calls them all # as all Element objects have an update method class Group(Element): def __init__(self, x, y, width, height, font): Element.__init__(self, x, y, width, height, font) # visible - whether to draw the elements or not self.visible = False # The list of elements # Any object that inherits from the Element class can be added # The elements in the list are associated with the Menu object but are not deleted if the menu is deleted self.elements = [] # texts is a two-dimensional list that stores pygame text objects # and the co-ords where each object should be drawn self.texts = [[], []] # Adds an element (any object that inherits the Element class) to the group def add(self, element): try: # Adds the x and y co-ords of the group to the element's co-ord element.x += self.x element.y += self.y element.x2 += self.x element.y2 += self.y element.rect.move_ip(self.x, self.y) # Run method that allows objects to do things specific to them when added element.on_menu_add() # Add object to elements list self.elements.append(element) except AttributeError: print("Error: Tried adding a non-element object to a group") # Adds text to render in the group, takes 2 parameters # text - the text to be added, in string form def add_text(self, text, coords): self.texts[0].append(self.font.render(text, 1, (255, 0, 0))) self.texts[1].append((coords[0] + self.x, coords[1] + self.y)) def draw(self, screen): if self.visible: # Draws each element in the group by calling its draw method for element in self.elements: element.draw(screen) # Draws each text object in the group to the screen for i in range(len(self.texts[0])): screen.blit(self.texts[0][i], self.texts[1][i]) def on_click(self, mouse_x, mouse_y): # Runs each element's on_click method for element in self.elements: element.on_click(mouse_x, mouse_y) def on_unclick(self): # Runs each element's on_unclick method for element in self.elements: element.on_unclick() def on_char_typed(self, key_pressed): # Runs each
been created") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): """Get decayed learning rate as a Tensor with dtype=var_dtype.""" lr_t = self._get_hyper("learning_rate", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper("decay", var_dtype) lr_t = lr_t / (1. + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): """Returns the config of the optimimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary. """ config = {"name": self._name} if hasattr(self, "clipnorm"): config["clipnorm"] = self.clipnorm if hasattr(self, "clipvalue"): config["clipvalue"] = self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Arguments: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if "lr" in config: config["learning_rate"] = config.pop("lr") if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): """Serialize a hyperparameter that can be a float, callable, or Tensor.""" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): """Returns variables of this Optimizer based on the order created.""" return self._weights @property def weights(self): """Returns variables of this Optimizer based on the order created.""" return self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "You called `set_weights(weights)` on optimizer " + self._name + " with a weight list of length " + str(len(weights)) + ", but the optimizer was expecting " + str(len(params)) + " weights. Provided weights: " + str(weights)[:50] + "...") if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError("Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer="zeros", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable variables. " "You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ.") else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError("Invalid type %r for %s, expected: %s." % (dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): """Call the function if param is callable.""" return param() if callable(param) else param def _resource_apply_dense(self, grad, handle): """Add ops to apply dense gradients to the variable `handle`. Args: grad: a `Tensor` representing the gradient. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- # For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): """Restore a newly created slot variable's value.""" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore UID first to minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): """Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for. """ variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an active variable creator # scope. Generally we'd like to eagerly create/restore slot variables # when possible, but this may mean that scopes intended to catch # `variable` also catch its eagerly created slot variable # unintentionally (specifically make_template would add a dependency on # a slot variable if not for this case). Deferring is mostly harmless # (aside from double initialization), and makes variable creator scopes # behave the same way they do when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not owned by any one object (because we don't want to # save the slot variable if the optimizer is saved without the non-slot # variable, or if the non-slot variable is saved without the optimizer; # it's a dependency hypergraph with edges of the form (optimizer, non-slot # variable, variable)). So we don't _track_ slot variables anywhere, and # instead special-case this dependency and otherwise pretend it's a normal # graph. if slot_variable is not None: #
servicebus topic create --resource-group myresourcegroup --namespace-name mynamespace --name mytopic """ helps['servicebus topic update'] = """ type: command short-summary: Updates the Service Bus Topic examples: - name: Updates existing Service Bus Topic. text: az servicebus topic update --resource-group myresourcegroup --namespace-name mynamespace --name mytopic --enable-ordering True """ helps['servicebus topic show'] = """ type: command short-summary: Shows the Service Bus Topic Details examples: - name: Shows the Topic details. text: az servicebus topic show --resource-group myresourcegroup --namespace-name mynamespace --name mytopic """ helps['servicebus topic list'] = """ type: command short-summary: List the Topic by Service Bus Namepsace examples: - name: Get the Topics by Namespace. text: az servicebus topic list --resource-group myresourcegroup --namespace-name mynamespace """ helps['servicebus topic delete'] = """ type: command short-summary: Deletes the Service Bus Topic examples: - name: Deletes the Service Bus Topic text: az servicebus topic delete --resource-group myresourcegroup --namespace-name mynamespace --name mytopic """ helps['servicebus topic authorization-rule create'] = """ type: command short-summary: Create Authorization Rule for given Service Bus Topic examples: - name: Create Authorization Rule for given Service Bus Topic text: az servicebus topic authorization-rule create --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule --rights Send Listen """ helps['servicebus topic authorization-rule update'] = """ type: command short-summary: Create Authorization Rule for given Service Bus Topic examples: - name: Create Authorization Rule for given Service Bus Topic text: az servicebus topic authorization-rule update --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule --rights Send """ helps['servicebus topic authorization-rule show'] = """ type: command short-summary: Shows the details of Authorization Rule for given Service Bus Topic examples: - name: Shows the details of Authorization Rule for given Service Bus Topic text: az servicebus topic authorization-rule show --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule """ helps['servicebus topic authorization-rule list'] = """ type: command short-summary: shows list of Authorization Rule by Service Bus Topic examples: - name: shows list of Authorization Rule by Service Bus Topic text: az servicebus topic authorization-rule list --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic """ helps['servicebus topic authorization-rule keys list'] = """ type: command short-summary: List the keys and connection strings of Authorization Rule for Service Bus Topic. examples: - name: List the keys and connection strings of Authorization Rule for Service Bus Topic. text: az servicebus topic authorization-rule keys list --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule """ helps['servicebus topic authorization-rule keys renew'] = """ type: command short-summary: Regenerate keys of Authorization Rule for Service Bus Topic. examples: - name: Regenerate key of Service Bus Topic. text: az servicebus topic authorization-rule keys renew --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule --key PrimaryKey """ helps['servicebus topic authorization-rule delete'] = """ type: command short-summary: Deletes the Authorization Rule of the given Service Bus Topic. examples: - name: Deletes the Authorization Rule of Service Bus Topic. text: az servicebus topic authorization-rule delete --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name myauthorule """ helps['servicebus topic subscription create'] = """ type: command short-summary: Create the ServiceBus Subscription examples: - name: Create a new Subscription. text: az servicebus topic subscription create --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name mysubscription """ helps['servicebus topic subscription update'] = """ type: command short-summary: Updates the ServiceBus Subscription examples: - name: Update a new Subscription. text: az servicebus topic subscription update --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name mysubscription --lock-duration PT3M """ helps['servicebus topic subscription show'] = """ type: command short-summary: Shows Service Bus Subscription Details examples: - name: Shows the Subscription details. text: az servicebus topic subscription show --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name mysubscription """ helps['servicebus topic subscription list'] = """ type: command short-summary: List the Subscription by Service Bus Topic examples: - name: Shows the Subscription by Service Bus Topic. text: az servicebus topic subscription list --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic """ helps['servicebus topic subscription delete'] = """ type: command short-summary: Deletes the Service Bus Subscription examples: - name: Deletes the Subscription text: az servicebus topic subscription delete --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --name mysubscription """ helps['servicebus topic subscription rule create'] = """ type: command short-summary: Create the ServiceBus Rule for Subscription examples: - name: Create Rule. text: az servicebus topic subscription rule create --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --subscription-name mysubscription --name myrule --filter-sql-expression myproperty=myvalue """ helps['servicebus topic subscription rule update'] = """ type: command short-summary: Updates the ServiceBus Rule for Subscription examples: - name: Updates Rule. text: az servicebus topic subscription rule update --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --subscription-name mysubscription --name myrule --filter-sql-expression myproperty=myupdatedvalue """ helps['servicebus topic subscription rule show'] = """ type: command short-summary: Shows ServiceBus Rule Details examples: - name: Shows the ServiceBus Rule details. text: az servicebus topic subscription rule show --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --subscription-name mysubscription --name myrule """ helps['servicebus topic subscription rule list'] = """ type: command short-summary: List the ServiceBus Rule by Subscription examples: - name: Shows the Rule ServiceBus by Subscription. text: az servicebus topic subscription rule list --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --subscription-name mysubscription """ helps['servicebus topic subscription rule delete'] = """ type: command short-summary: Deletes the ServiceBus Rule examples: - name: Deletes the ServiceBus Rule text: az servicebus topic subscription rule delete --resource-group myresourcegroup --namespace-name mynamespace --topic-name mytopic --subscription-name mysubscription --name myrule """ helps['servicebus georecovery-alias exists'] = """ type: command short-summary: Check if Geo Recovery Alias Name is available examples: - name: Check availability of the Geo-Disaster Recovery Configuration Alias Name text: az servicebus georecovery-alias exists --resource-group myresourcegroup --namespace-name primarynamespace --alias myaliasname """ helps['servicebus georecovery-alias set'] = """ type: command short-summary: Sets Service Bus Geo-Disaster Recovery Configuration Alias for the give Namespace examples: - name: Sets Geo Disaster Recovery configuration - Alias for the give Namespace text: az servicebus georecovery-alias set --resource-group myresourcegroup --namespace-name primarynamespace --alias myaliasname --partner-namespace armresourceid """ helps['servicebus georecovery-alias show'] = """ type: command short-summary: shows properties of Service Bus Geo-Disaster Recovery Configuration Alias for Primay/Secondary Namespace examples: - name: show properties Geo-Disaster Recovery Configuration Alias of the Primary Namespace text: az servicebus georecovery-alias show --resource-group myresourcegroup --namespace-name primarynamespace --alias myaliasname - name: Get details of Alias (Geo DR Configuration) of the Secondary Namespace text: az servicebus georecovery-alias show --resource-group myresourcegroup --namespace-name secondarynamespace --alias myaliasname """ helps['servicebus georecovery-alias authorization-rule list'] = """ type: command short-summary: Shows the list of Authorization Rule by Service Bus Namespace examples: - name: Shows the list of Authorization Rule by Service Bus Namespace text: az servicebus georecovery-alias authorization-rule list --resource-group myresourcegroup --namespace-name mynamespace --alias myaliasname """ helps['servicebus georecovery-alias authorization-rule keys list'] = """ type: command short-summary: List the keys and connection strings of Authorization Rule for the Service Bus Namespace examples: - name: List the keys and connection strings of Authorization Rule for the namespace. text: az servicebus georecovery-alias authorization-rule keys list --resource-group myresourcegroup --namespace-name mynamespace --name myauthorule --alias myaliasname """ helps['servicebus georecovery-alias break-pair'] = """ type: command short-summary: Disables Service Bus Geo-Disaster Recovery Configuration Alias and stops replicating changes from primary to secondary namespaces examples: - name: Disables the Disaster Recovery and stops replicating changes from primary to secondary namespaces text: az servicebus georecovery-alias break-pair --resource-group myresourcegroup --namespace-name primarynamespace --alias myaliasname """ helps['servicebus georecovery-alias fail-over'] = """ type: command short-summary: Invokes Service Bus Geo-Disaster Recovery Configuration Alias failover and re-configure the alias to point to the secondary namespace examples: - name: Invokes Geo-Disaster Recovery Configuration Alias failover and reconfigure the alias to point to the secondary namespace text: az servicebus georecovery-alias fail-over --resource-group myresourcegroup --namespace-name secondarynamespace --alias myaliasname """ helps['servicebus georecovery-alias delete'] = """ type: command short-summary: Deletes Service Bus Geo-Disaster Recovery Configuration Alias request accepted examples: - name: Delete Service Bus Geo-Disaster Recovery Configuration Alias request accepted text: az servicebus georecovery-alias delete --resource-group myresourcegroup --namespace-name secondarynamespace --alias myaliasname """ helps['servicebus migration start'] = """ type: command short-summary: Create and Start Service Bus Migration of Standard to Premium namespace. long-summary: Service Bus Migration requires an empty Premium namespace to replicate entities from Standard namespace. examples: - name: Create and Start Service Bus Migration of Standard to Premium namespace text: az servicebus migration start --resource-group myresourcegroup --name standardnamespace --target-namespace ARMIDpremiumnamespace --post-migration-name mypostmigrationname """ helps['servicebus migration show'] = """ type: command short-summary: shows properties of properties of Service Bus
# -*- coding: utf-8 -*- """ This package implements several community detection. Originally based on community aka python-louvain library from <NAME> (https://github.com/taynaud/python-louvain) """ from __future__ import print_function import array import random from math import exp, log, sqrt from collections import defaultdict from collections import Counter import networkx as nx from .community_status import Status __author__ = """<NAME> (<EMAIL>)""" __author__ = """<NAME> (<EMAIL>)""" # Copyright (C) 2018 by # <NAME> (<EMAIL>> # <NAME> (<EMAIL>) # All rights reserved. # BSD license. __PASS_MAX = -1 __MIN = 0.0000001 def partition_at_level(dendrogram, level): """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest communities, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the communities Parameters ---------- dendrogram : list of dict a list of partitions, ie dictionnaries where keys of the i+1 are the values of the i. level : int the level which belongs to [0..len(dendrogram)-1] Returns ------- partition : dictionnary A dictionary where keys are the nodes and the values are the set it belongs to Raises ------ KeyError If the dendrogram is not well formed or the level is too high See Also -------- best_partition which directly combines partition_at_level and generate_dendrogram to obtain the partition of highest modularity Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> dendrogram = generate_dendrogram(G) >>> for level in range(len(dendrogram) - 1) : >>> print("partition at level", level, "is", partition_at_level(dendrogram, level)) # NOQA """ partition = dendrogram[0].copy() for index in range(1, level + 1): for node, community in partition.items(): partition[node] = dendrogram[index][community] return partition def modularity(partition, graph, weight='weight',gamma = 1.): """Compute the modularity of a partition of a graph Parameters ---------- partition : dict the partition of the nodes, i.e a dictionary where keys are their nodes and values the communities graph : networkx.Graph the networkx graph which is decomposed weight : str, optional the key in graph to use as weight. Default to 'weight' gamma : float a granularity parameter for the modularity Returns ------- modularity : float The modularity Raises ------ KeyError If the partition is not a partition of all graph nodes ValueError If the graph has no link TypeError If graph is not a networkx.Graph References ---------- .. 1. Newman, M.E.J. & <NAME>. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G, model = 'ppm') >>> modularity(part, G) """ if graph.is_directed(): raise TypeError("Bad graph type, use only non directed graph") inc = dict([]) deg = dict([]) links = graph.size(weight=weight) if links == 0: raise ValueError("A graph without link has an undefined modularity") for node in graph: com = partition[node] deg[com] = deg.get(com, 0.) + graph.degree(node, weight=weight) for neighbor, datas in graph[node].items(): edge_weight = datas.get(weight, 1) if partition[neighbor] == com: if neighbor == node: inc[com] = inc.get(com, 0.) + float(edge_weight) else: inc[com] = inc.get(com, 0.) + float(edge_weight) / 2. res = 0. for com in set(partition.values()): res += (inc.get(com, 0.) / links) - \ gamma * (deg.get(com, 0.) / (2. * links)) ** 2 return res def best_partition(graph, model=None, partition=None, weight='weight', resolution=1., randomize=False, pars = None): assert model in ('dcppm','ppm','ilfr','ilfrs'), "Unknown model specified" """Compute the partition of the graph nodes which maximises the modularity (or try..) using the Louvain heuristices This is the partition of highest modularity, i.e. the highest partition of the dendrogram generated by the Louvain algorithm. Parameters ---------- graph : networkx.Graph the networkx graph which is decomposed model : string should be 'ilfr', 'ilfrs', 'ppm' or 'dcppm' partition : dict, optional the algorithm will start using this partition of the nodes. It's a dictionary where keys are their nodes and values the communities weight : str, optional the key in graph to use as weight. Default to 'weight' resolution : double, optional Will change the size of the communities, default to 1. represents the time described in "Laplacian Dynamics and Multiscale Modular Structure in Networks", <NAME>, <NAME>, <NAME> randomize : boolean, optional Will randomize the node evaluation order and the community evaluation order to get different partitions at each call pars : dict, optional the dict with 'mu' or 'gamma' key and a float value. Use 'mu' within (0,1) for 'ilfr' and 'ilfrs' models and 'gamma' within (0,inf) for 'ppm' and 'dcppm' models. Also, for 'ppm' model it's possible to use two optional parameters, fixedPin and fixedPout, they could be used to modify the gamma calculation. Returns ------- partition : dictionary The partition, with communities numbered from 0 to number of communities Raises ------ NetworkXError If the graph is not Eulerian. See Also -------- generate_dendrogram to obtain all the decompositions levels Notes ----- Uses Louvain algorithm References ---------- .. 1. Blondel, V.D. et al. Fast unfolding of communities in large networks. J. Stat. Mech 10008, 1-12(2008). Examples -------- >>> #Basic usage >>> G=nx.erdos_renyi_graph(100, 0.01) >>> part = best_partition(G, model='ppm') >>> #other example to display a graph with its community : >>> #better with karate_graph() as defined in networkx examples >>> #erdos renyi don't have true community structure >>> G = nx.erdos_renyi_graph(30, 0.05) >>> #first compute the best partition >>> partition = best_partition(G, model='ppm') >>> #drawing >>> size = float(len(set(partition.values()))) >>> pos = nx.spring_layout(G) >>> count = 0. >>> for com in set(partition.values()) : >>> count += 1. >>> list_nodes = [nodes for nodes in partition.keys() >>> if partition[nodes] == com] >>> nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20, node_color = str(count / size)) >>> nx.draw_networkx_edges(G, pos, alpha=0.5) >>> plt.show() """ dendo = generate_dendrogram(graph, model, partition, weight, resolution, randomize, pars=pars) return partition_at_level(dendo, len(dendo) - 1) def generate_dendrogram(graph, model=None, part_init=None, weight='weight', resolution=1., randomize=False, pars = None): """Find communities in the graph and return the associated dendrogram A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest communities, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the communities Parameters ---------- graph : networkx.Graph the networkx graph which will be decomposed model : string should be 'ilfr', 'ilfrs', 'ppm' or 'dcppm' part_init : dict, optional the algorithm will start using this partition of the nodes. It's a dictionary where keys are their nodes and values the communities weight : str, optional the key in graph to use as weight. Default to 'weight' resolution : double, optional Will change the size of the communities, default to 1. represents the time described in "Laplacian Dynamics and Multiscale Modular Structure in Networks", <NAME>, <NAME>, <NAME> pars : dict, optional the dict with 'mu' or 'gamma' key and a float value. Use 'mu' within (0,1) for 'ilfr' and 'ilfrs' models and 'gamma' within (0,inf) for 'ppm' and 'dcppm' models. Also, for 'ppm' model it's possible to use two optional parameters, fixedPin and fixedPout, they could be used to modify the gamma calculation. Returns ------- dendrogram : list of dictionaries a list of partitions, ie dictionnaries where keys of the i+1 are the values of the i. and where keys of the first are the nodes of graph Raises ------ TypeError If the graph is not a networkx.Graph See Also -------- best_partition Notes ----- Uses Louvain algorithm References ---------- .. 1. Blondel, V.D. et al. Fast unfolding of communities in large networks. J. Stat. Mech 10008, 1-12(2008). Examples -------- >>> G=nx.erdos_renyi_graph(100, 0.01) >>> dendo = generate_dendrogram(G, model='ppm') >>> for level in range(len(dendo) - 1) : >>> print("partition at level", level, >>> "is", partition_at_level(dendo, level)) :param weight: :type weight: """ if graph.is_directed(): raise TypeError("Bad graph type, use only non directed graph") # special case, when there is no link # the best partition is everyone in its community if graph.number_of_edges() == 0: part = dict([]) for node in graph.nodes(): part[node] = node return [part] current_graph = graph.copy() status = Status() status.init(current_graph, weight, part_init)
<reponame>theGreenJedi/BciPy import glob import itertools import logging import random from os import path, sep from typing import Iterator import numpy as np import sounddevice as sd import soundfile as sf from PIL import Image from psychopy import core # Prevents pillow from filling the console with debug info logging.getLogger("PIL").setLevel(logging.WARNING) def best_selection(selection_elements: list, val: list, len_query: int, always_included=None) -> list: """Best Selection. given set of elements and a value function over the set, picks the len_query number of elements with the best value. Args: selection_elements(list[str]): the set of elements val(list[float]): values for the corresponding elements len_query(int): number of elements to be picked from the set always_included(list[str]): subset of elements that should always be included in the result. Defaults to None. Return: best_selection(list[str]): elements from selection_elements with the best values """ always_included = always_included or [] n = len_query # pick the top n items sorted by value in decreasing order elem_val = dict(zip(selection_elements, val)) best = sorted(selection_elements, key=elem_val.get, reverse=True)[0:n] replacements = [ item for item in always_included if item not in best and item in selection_elements ][0:n] if replacements: best[-len(replacements):] = replacements return best def best_case_rsvp_seq_gen(alp: list, session_stimuli: list, timing=[1, 0.2], color=['red', 'white'], stim_number=1, stim_length=10, is_txt=True, seq_constants=None) -> tuple: """Best Case RSVP Sequence Generation. generates RSVPKeyboard sequence by picking n-most likely letters. Args: alp(list[str]): alphabet (can be arbitrary) session_stimuli(ndarray[float]): quantifier metric for query selection dim(session_stimuli) = card(alp)! timing(list[float]): Task specific timing for generator color(list[str]): Task specific color for generator First element is the target, second element is the fixation Observe that [-1] element represents the trial information stim_number(int): number of random stimuli to be created stim_length(int): number of trials in a sequence seq_constants(list[str]): list of letters that should always be included in every sequence. If provided, must be alp items. Return: schedule_seq(tuple( samples[list[list[str]]]: list of sequences timing(list[list[float]]): list of timings color(list(list[str])): list of colors)): scheduled sequences """ if len(alp) != len(session_stimuli): raise Exception('Missing information about alphabet. len(alp):{}, ' 'len(session_stimuli):{}, should be same!'.format( len(alp), len(session_stimuli))) if seq_constants and not set(seq_constants).issubset(alp): raise Exception('Sequence constants must be alphabet items.') # create a list of alphabet letters alphabet = [i for i in alp] # query for the best selection query = best_selection(alphabet, session_stimuli, stim_length, seq_constants) # shuffle the returned values random.shuffle(query) # Init some lists to construct our stimuli with samples, times, colors = [], [], [] for idx_num in range(stim_number): # append a fixation cross. if not text, append path to image fixation if is_txt: sample = ['+'] else: sample = ['bcipy/static/images/bci_main_images/PLUS.png'] # construct the sample from the query sample += [i for i in query] samples.append(sample) # append timing times.append([timing[i] for i in range(len(timing) - 1)] + [timing[-1]] * stim_length) # append colors colors.append([color[i] for i in range(len(color) - 1)] + [color[-1]] * stim_length) return (samples, times, colors) def random_rsvp_calibration_seq_gen(alp, timing=[0.5, 1, 0.2], color=['green', 'red', 'white'], stim_number=10, stim_length=10, is_txt=True): """Random RSVP Calibration Sequence Generator. Generates random RSVPKeyboard sequences. Args: alp(list[str]): alphabet (can be arbitrary) timing(list[float]): Task specific timing for generator color(list[str]): Task specific color for generator First element is the target, second element is the fixation Observe that [-1] element represents the trial information stim_number(int): number of random stimuli to be created stim_length(int): number of trials in a sequence Return: schedule_seq(tuple( samples[list[list[str]]]: list of sequences timing(list[list[float]]): list of timings color(list(list[str])): list of colors)): scheduled sequences """ len_alp = len(alp) samples, times, colors = [], [], [] for idx_num in range(stim_number): idx = np.random.permutation(np.array(list(range(len_alp)))) rand_smp = (idx[0:stim_length]) if not is_txt: sample = [ alp[rand_smp[0]], 'bcipy/static/images/bci_main_images/PLUS.png'] else: sample = [alp[rand_smp[0]], '+'] rand_smp = np.random.permutation(rand_smp) sample += [alp[i] for i in rand_smp] samples.append(sample) times.append([timing[i] for i in range(len(timing) - 1)] + [timing[-1]] * stim_length) colors.append([color[i] for i in range(len(color) - 1)] + [color[-1]] * stim_length) schedule_seq = (samples, times, colors) return schedule_seq def target_rsvp_sequence_generator(alp, target_letter, parameters, timing=[0.5, 1, 0.2], color=['green', 'white', 'white'], stim_length=10, is_txt=True): """Target RSVP Sequence Generator. Generate target RSVPKeyboard sequences. Args: alp(list[str]): alphabet (can be arbitrary) target_letter([str]): letter to be copied timing(list[float]): Task specific timing for generator color(list[str]): Task specific color for generator First element is the target, second element is the fixation Observe that [-1] element represents the trial information stim_length(int): number of trials in a sequence Return: schedule_seq(tuple( samples[list[list[str]]]: list of sequences timing(list[list[float]]): list of timings color(list(list[str])): list of colors)): scheduled sequences """ len_alp = len(alp) # intialize our arrays samples, times, colors = [], [], [] rand_smp = random.sample(range(len_alp), stim_length) if is_txt: sample = ['+'] else: sample = ['bcipy/static/images/bci_main_images/PLUS.png'] target_letter = parameters['path_to_presentation_images'] + \ target_letter + '.png' sample += [alp[i] for i in rand_smp] # if the target isn't in the array, replace it with some random index that # is not fixation if target_letter not in sample: random_index = np.random.randint(0, stim_length - 1) sample[random_index + 1] = target_letter # add target letter to start sample = [target_letter] + sample # to-do shuffle the target letter samples.append(sample) times.append([timing[i] for i in range(len(timing) - 1)] + [timing[-1]] * stim_length) colors.append([color[i] for i in range(len(color) - 1)] + [color[-1]] * stim_length) schedule_seq = (samples, times, colors) return schedule_seq def get_task_info(experiment_length, task_color): """Get Task Info. Generates fixed RSVPKeyboard task text and color information for display. Args: experiment_length(int): Number of sequences for the experiment task_color(str): Task information display color Return get_task_info((tuple): task_text: array of task text to display task_color: array of colors for the task text ) """ # Do list comprehensions to get the arrays for the task we need. task_text = ['%s/%s' % (stim + 1, experiment_length) for stim in range(experiment_length)] task_color = [[str(task_color)] for stim in range(experiment_length)] return (task_text, task_color) def rsvp_copy_phrase_seq_generator(alp, target_letter, timing=[0.5, 1, 0.2], color=['green', 'white', 'white'], stim_length=10): """Generate copy phrase RSVPKeyboard sequences. Args: alp(list[str]): alphabet (can be arbitrary) target_letter([str]): letter to be copied timing(list[float]): Task specific timing for generator color(list[str]): Task specific color for generator First element is the target, second element is the fixation Observe that [-1] element represents the trial information stim_length(int): number of trials in a sequence Return: schedule_seq(tuple( samples[list[list[str]]]: list of sequences timing(list[list[float]]): list of timings color(list(list[str])): list of colors)): scheduled sequences """ len_alp = len(alp) # initialize our arrays samples, times, colors = [], [], [] rand_smp = np.random.randint(0, len_alp, stim_length) sample = ['+'] sample += [alp[i] for i in rand_smp] # if the target isn't in the array, replace it with some random index that # is not fixation if target_letter not in sample: random_index = np.random.randint(0, stim_length - 1) sample[random_index + 1] = target_letter # to-do shuffle the target letter samples.append(sample) times.append([timing[i] for i in range(len(timing) - 1)] + [timing[-1]] * stim_length) colors.append([color[i] for i in range(len(color) - 1)] + [color[-1]] * stim_length) schedule_seq = (samples, times, colors) return schedule_seq def generate_icon_match_images(experiment_length, image_path, number_of_sequences, timing, is_word): """Generate Image Icon Matches. Generates an array of images to use for the icon matching task. Args: experiment_length(int): Number of images per sequence image_path(str): Path to image files number_of_sequences(int): Number of sequences to generate timing(list): List of timings; [parameters['time_target'], parameters['time_cross'], parameters['time_flash']] is_word(bool): Whether or not this is an icon to word matching task Return generate_icon_match_images(arrays of tuples of paths to images to display, and timings) """ # Get all png images in image path image_array = [ img for img in glob.glob(image_path + "*.png") if not img.endswith("PLUS.png") ] if experiment_length > len(image_array) - 1: raise Exception( 'Number of images to be displayed on screen is longer than number of images available') # Generate indexes of target images target_image_numbers = np.random.randint( 0, len(image_array), number_of_sequences) # Array of images to return return_array = [] # Array of timings to return return_timing = [] for specific_time in range(len(timing) - 1): return_timing.append(timing[specific_time]) for item_without_timing in range(len(return_timing), experiment_length): return_timing.append(timing[-1]) for sequence in range(number_of_sequences): return_array.append([]) # Generate random permutation of image indexes random_number_array = np.random.permutation(len(image_array)) if is_word: # Add name of target image to array image_path = path.basename( image_array[target_image_numbers[sequence]]) return_array[sequence].append(image_path.replace('.png', '')) else: # Add target image to image array return_array[sequence].append( image_array[target_image_numbers[sequence]]) # Add PLUS.png to image array return_array[sequence].append( 'bcipy/static/images/bci_main_images/PLUS.png') # Add
!= 1: # pragma: no cover raise ValueError( "expected 1 value in each column of MAPS matrix " f"but for SE {se}, found {n} values in column {col}" ) matrix[r, 0] = col matrix[r, 1] = struct.unpack(frmu, self._fileh.read(bytes_per))[0] self._fileh.read(4) # endrec key = self._getkey() col += 1 self._getkey() dtype = self._getkey() self.rdop2eot() if mtype > 2: matrix.dtype = complex return matrix def _rdop2drm(self, name): """ Read Nastran output2 DRM SORT1 data block (table). Parameters ---------- name : string Name of data block. Returns ------- drm : ndarray The drm matrix. elem_info : ndarray 2-column matrix of [id, element_type]. Notes ----- Reads OEF1 and OES1 type data blocks. This routine is beta -- check output carefully. """ # Expect IDENT/DATA record pairs. They repeat for each element # type for each mode. # This routine assumes all values are written, even the zeros. def _getdrm(pos, e, s, eids, etypes, ibytes): bytes_per_col = pos - s nrows = len(eids) ncols = (e - s) // bytes_per_col dtype = np.int32 if ibytes == 4 else np.int64 drm = np.empty((nrows, ncols), dtype, order="F") elem_info = np.column_stack((eids, etypes)) elem_info[:, 0] //= 10 return drm, elem_info s = self._fileh.tell() _, e = self._get_block_bytes(name, s) # read first IDENT above loop & check ACODE/TCODE: ident = self.rdop2record() n_ident = len(ident) achk = self._check_code(ident[0], [4], [[2]], "ACODE") tchk = self._check_code(ident[1], [1, 7], [[1], [0, 2]], "TCODE") if not (achk and tchk): raise ValueError("invalid ACODE and/or TCODE value") eids = [] etypes = [] column = [] drm = None n_data = [] r = 0 j = 0 while ident is not None: elemtype = ident[2] mode = ident[4] nwords = ident[9] # number of words/entry if mode == 1: # DATA record: data = self.rdop2record().reshape(-1, nwords).T n_data.append(data.size) pos = self._fileh.tell() z = np.zeros((nwords - 1, 1), data.dtype) eids.extend((data[0] + z).T.ravel()) etypes.extend([elemtype] * data.shape[1] * (nwords - 1)) column.extend(data[1:].T.ravel()) else: # DATA record: data = self.rdop2record(N=n_data[j]) data = data.reshape(-1, nwords).T if drm is None: drm, elem_info = _getdrm(pos, e, s, eids, etypes, self._ibytes) drm[:, mode - 2] = column n = (nwords - 1) * data.shape[1] drm[r : r + n, mode - 1] = data[1:].T.ravel() j += 1 r += n if r == drm.shape[0]: j = 0 r = 0 # IDENT record: ident = self.rdop2record(N=n_ident) if drm is None: drm, elem_info = _getdrm(pos, e, s, eids, etypes, self._ibytes) drm[:, mode - 1] = column drm.dtype = np.float32 if self._ibytes == 4 else np.float64 return drm, elem_info def rddrm2op2(self, verbose=False): """ Read op2 file output by DRM2 DMAP. Parameters ---------- verbose : bool If true, echo names of tables and matrices to screen. Returns ------- drmkeys : dictionary - 'dr' : data recovery items in order requested (from XYCDBDRS) - 'drs' : sorted version of 'dr' (from XYCDBDRS) - 'tougv1', 'tougs1', etc : directories corresponding to the data recovery matrices (which are written to op4). All of these start with 'to' (lower case). Notes ----- File is created with a header and then these data blocks are written: .. code-block:: none OUTPUT2 XYCDBDRS//0/OP2UNIT $ OUTPUT2 TOUGV1,TOUGS1,TOUGD1//0/OP2UNIT $ OUTPUT2 TOQGS1,TOQGD1,TOEFS1,TOEFD1//0/OP2UNIT $ OUTPUT2 TOESS1,TOESD1//0/OP2UNIT $ """ self._fileh.seek(self._postheaderpos) drmkeys = {} while 1: name, trailer, rectype = self.rdop2nt() if name is None: break if rectype > 0: if verbose: print(f"Skipping matrix {name}...") self.skipop2matrix(trailer) elif len(name) > 2 and name.find("TO") == 0: if verbose: print(f"Reading {name}...") # skip record 1 self.rdop2record() # record 2 contains directory # - starting at 10: type, id, number, row, 0 info = self.rdop2record()[10:] drmkeys[name.lower()] = (info.reshape(-1, 5).T)[:-1] self.rdop2eot() elif len(name) > 4 and name[:4] == "XYCD": if verbose: print(f"Reading {name}...") # record 1 contains order of request info drmkeys["dr"] = self.rdop2record() # record 2 contains sorted list drmkeys["drs"] = self.rdop2record().reshape(-1, 6).T self.rdop2eot() else: if verbose: print(f"Skipping table {name}...") self.skipop2table() return drmkeys def rdn2cop2(self): """ Read Nastran output2 file written by DMAP NAS2CAM; usually called by :func:`rdnas2cam`. Returns ------- dictionary 'selist' : array 2-columns matrix: [ seid, dnseid ] where, for each row, dnseid is the downstream superelement for seid. (dnseid = 0 if seid = 0). 'uset' : dictionary Indexed by the SE number. Each member is a pandas DataFrame described below. 'cstm' : dictionary Indexed by the SE number. Each member is a 14-column matrix containing the coordinate system transformation matrix for each coordinate system. See description below. 'cstm2' : dictionary Indexed by the SE number. Each member is another dictionary indexed by the coordinate system id number. This has the same information as 'cstm', but in a different format. See description below. 'maps' : dictionary Indexed by the SE number. Each member is a mapping table for mapping the A-set order from upstream to downstream; see below. 'dnids' : dictionary Indexed by the SE number. Each member is a vector of ids of the A-set ids of grids and spoints for SE in the downstream superelement. (Does not have each DOF, just ids.) When using the CSUPER entry, these will be the ids on that entry. When using SECONCT entry, the ids are internally generated and will be a subset of the 'upids' entry for the downstream SE. 'upids' : dictionary Indexed by the SE number. Each member is a vector of ids of the A-set grids and spoints for upstream se's that connect to SE. (Does not have each DOF, just ids.) This will only be present for SECONCT type SEs. These ids are internally generated and will contain all the values in the 'dnids' of each upstream SE. This allows, for example, the routine :func:`pyyeti.nastran.n2p.upasetpv` to work. Has 0's for downstream ids (in P-set) that are not part of upstream SEs. Notes ----- The module `nastran` has many routines that use the data created by this routine. *'uset' description* Each `uset` variable is a 4-column pandas DataFrame with ID and DOF forming the row MultiIndex. The column index is: ``['nasset', 'x', 'y', 'z']``. The order of the degrees of freedom is in Nastran internal sort. GRIDs have 6 rows each and SPOINTs have 1 row. Here is an example `uset` variable with some notes for the ['x', 'y', 'z'] columns (described in more detail below):: nasset x y z id dof 1 1 2097154 1.0 2.0 3.0 # grid location in basic 2 2097154 0.0 1.0 0.0 # coord system info 3 2097154 0.0 0.0 0.0 # coord system origin 4 2097154 1.0 0.0 0.0 # | transform to basic 5 2097154 0.0 1.0 0.0 # | for coord system 6 2097154 0.0 0.0 1.0 # | 2 0 4194304 0.0 0.0 0.0 # spoint That example was formed by using :func:`pyyeti.nastran.n2p.make_uset`:: from pyyeti.nastran import n2p uset = n2p.make_uset(dof=[[1, 123456], [2, 0]], nasset=[n2p.mkusetmask('b'), n2p.mkusetmask('q')], xyz=[[1, 2, 3], [0, 0, 0]]) The "nasset" column specifies Nastran set membership. It is a 32-bit integer for each DOF. The integer has bits set to specify which Nastran set that particular DOF belongs to. For example, if the integer has the 1 bit set, the DOF is in the m-set. See the source code in :func:`pyyeti.nastran.n2p.mkusetmask` for all the bit positions. Note that you rarely (if ever) need to call :func:`pyyeti.nastran.n2p.mkusetmask` directly since the function :func:`pyyeti.nastran.n2p.mksetpv` does this behind the scenes to make partition vectors. For grids, the ['x', 'y', 'z'] part of the DataFrame is a 6 row by 3 column matrix:: Coord_Info = [[x y z] # location of node in basic [id type 0] # coord. id and type [xo yo zo] # origin of coord. system [ T ]] # 3x3 transformation to basic # for coordinate system For spoints, the ['x', 'y', 'z'] part of the DataFrame is a 1 row by 3 column matrix:: Coord_Info = [0.0, 0.0, 0.0] *'cstm' description* Each `cstm` contains all the coordinate system
value: value = '%s%s%s' % (perc, value, perc) conditions.append({'name': key[:-1], 'op': 'like', 'value': value}) else: if value != null: conditions.append({'name': key, 'op': eq, 'value': value}) else: conditions.append({'name': key, 'op': 'is null', 'value': ''}) elif key == 'conditions': conditions.extend(eval(value)) elif key == 'fields': # remove the last ',' if value.endswith(','): value = value[:-1] new_params[key] = value.split(',') else: if is_api_param_a_list(apiname, key): new_params[key] = value.split(',') else: new_params[key] = value elif gt in param: key, value = param.split(gt, 1) conditions.append({'name': key, 'op': gt, 'value': value}) elif lt in param: key, value = param.split(lt, 1) conditions.append({'name': key, 'op': lt, 'value': value}) new_params['conditions'] = conditions return new_params def create_msg(apiname, params): creator = self.msg_creator.get(apiname) if creator: return creator(apiname, params) if apiname.startswith('APIQuery') and apiname not in NOT_QUERY_MYSQL_APIS: params = generate_query_params(apiname, params) msg = eval('inventory.%s()' % apiname) for key in params.keys(): value = params[key] setattr(msg, key, value) return msg def set_session_to_api(msg): session = inventory.Session() session.uuid = self.session_uuid msg.session = session def clear_session(): self.session_uuid = None self.account_name = None self.user_name = None open(SESSION_FILE, 'w+').close() if args[0].startswith('#'): return (apiname, all_params) = build_params() if apiname in self.cli_cmd: # self.write_more(apiname, None) self.cli_cmd_func[apiname](all_params) return if not check_session(apiname): raise CliError("No session uuid defined") msg = create_msg(apiname, all_params) set_session_to_api(msg) try: if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.CREATE_ACCOUNT_NAME, self.CREATE_USER_NAME]: if not msg.password: raise CliError('"password" must be specified') msg.password = <PASSWORD>(msg.password).<PASSWORD>() if apiname in [self.USER_RESET_PASSWORD_NAME, self.ACCOUNT_RESET_PASSWORD_NAME]: if msg.password: msg.password = <PASSWORD>(msg.password).<PASSWORD>() if apiname == self.LOGOUT_MESSAGE_NAME: if not msg.sessionUuid: setattr(msg, 'sessionUuid', self.session_uuid) start_time = time.time() (name, event) = self.api.async_call_wait_for_complete(msg, fail_soon=True) end_time = time.time() if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.LOGIN_BY_LDAP_MESSAGE_NAME]: self.session_uuid = event.inventory.uuid self.account_name = None self.user_name = None session_file_writer = open(SESSION_FILE, 'w') session_file_writer.write(self.session_uuid) account_name_field = 'accountName' user_name_field = 'userName' if apiname == self.LOGIN_BY_LDAP_MESSAGE_NAME: self.account_name = event.accountInventory.name session_file_writer.write("\n" + self.account_name) elif apiname == self.LOGIN_MESSAGE_NAME: self.account_name = all_params[account_name_field] session_file_writer.write("\n" + self.account_name) elif apiname == self.LOGIN_BY_USER_NAME: self.account_name = all_params[account_name_field] self.user_name = all_params[user_name_field] session_file_writer.write("\n" + self.account_name) session_file_writer.write("\n" + self.user_name) if apiname == self.LOGOUT_MESSAGE_NAME: clear_session() result = jsonobject.dumps(event, True) result = str(result) result2 = [] for line in result.split('\n'): result2.append(str(line).decode('unicode_escape').replace('\n', '\\n')) result = '\n'.join(result2) print '%s\n' % result # print 'Time costing: %fs' % (end_time - start_time) self.write_more(args, result) except urllib3.exceptions.MaxRetryError as url_err: self.print_error('Is %s reachable? Please make sure the management node is running.' % self.api.api_url) self.print_error(str(url_err)) raise ("Server: %s is not reachable" % self.hostname) except Exception as e: self.write_more(args, str(e), False) if 'Session expired' in str(e): clear_session() raise e def main(self, cmd=None): if not cmd: self.usage() exit_code = 0 import atexit if not os.path.exists(os.path.dirname(CLI_HISTORY)): os.system('mkdir -p %s' % os.path.dirname(CLI_HISTORY)) atexit.register(clean_password_in_cli_history) atexit.register(readline.write_history_file, CLI_HISTORY) while True: try: if cmd: self.do_command(cmd) else: line = raw_input(self.get_prompt_with_account_info()) if line: pairs = shlex.split(line) self.do_command(pairs) except CliError as cli_err: self.print_error(str(cli_err)) exit_code = 1 except EOFError: print '' sys.exit(1) except KeyboardInterrupt: print '' except Exception as e: exit_code = 3 self.print_error(str(e)) if cmd: sys.exit(exit_code) def build_api_parameters(self): def rule_out_unneeded_params(keys): excludes = ['session'] for k in excludes: if k in keys: keys.remove(k) return keys for apiname in inventory.api_names: obj = eval("inventory.%s()" % apiname) params = [] params.extend(obj.__dict__.keys()) self.api_class_params[apiname] = rule_out_unneeded_params(params) def _parse_api_name(self, api_names): """ Remove API pattern 'API' and appendix 'MSG' """ short_api_name = [] for api in api_names: if api.endswith('Msg'): short_api_name.append(api[3:-3]) short_api_name.sort() return short_api_name def get_prompt_with_account_info(self): prompt_with_account_info = '' if self.account_name: prompt_with_account_info = self.account_name if self.user_name: prompt_with_account_info = prompt_with_account_info + '/' + self.user_name else: prompt_with_account_info = '-' prompt_with_account_info = prompt_with_account_info + ' ' + prompt return prompt_with_account_info def completer_print(self, substitution, matches, longest_match_length): def print_match(columes, new_matches, max_match_length): cur_col = 1 for match in new_matches: if cur_col == columes: end_sign = '\n' cur_col = 1 else: end_sign = ' ' * (max_match_length - len(match)) cur_col += 1 try: index = match.lower().index(self.curr_pattern.lower()) except Exception as e: print "can't find pattern: %s in match: %s" % (self.curr_pattern, match) print e raise e cprint(match[0:index], end='') cprint(match[index:(len(self.curr_pattern) + index)], attrs=['bold', 'reverse'], end='') cprint(match[(len(self.curr_pattern) + index):], end=end_sign) def print_bold(): max_match_length = 0 matches_dot = [] matches_eq_cond = [] matches_eq_param = [] matches_ot = [] currtext = readline.get_line_buffer() apiname = currtext.split()[0] if apiname.startswith('Query') and not apiname in NOT_QUERY_MYSQL_APIS: query_cmd = True else: query_cmd = False for match in matches: if len(match) > max_match_length: max_match_length = len(match) if match.endswith('.'): matches_dot.append(match) elif match.endswith('='): for key in query_param_keys: if query_cmd and match.startswith(key): matches_eq_param.append(match) break else: matches_eq_cond.append(match) else: matches_ot.append(match) max_match_length += 2 try: term_width = int(os.popen('stty size', 'r').read().split()[1]) except: term_width = 80 columes = term_width / max_match_length if columes == 0: columes = 1 if matches_dot: if query_cmd: cprint('[Query Conditions:]', attrs=['bold'], end='\n') print_match(columes, matches_dot, max_match_length) print '\n' if matches_eq_cond: # cprint('[Primitive Query Conditions:]', attrs=['bold'], end='\n') print_match(columes, matches_eq_cond, max_match_length) print '\n' if matches_eq_param: if query_cmd: cprint('[Parameters:]', attrs=['bold'], end='\n') print_match(columes, matches_eq_param, max_match_length) print '\n' if matches_ot: print_match(columes, matches_ot, max_match_length) print '\n' print '' print_bold() print '' cprint('%s%s' % (self.get_prompt_with_account_info(), readline.get_line_buffer()), end='') # readline.redisplay() def write_more(self, cmd, result, success=True): if self.hd.get(self.start_key): start_value = int(self.hd.get(self.start_key)) else: start_value = 0 if self.hd.get(self.last_key): last_value = int(self.hd.get(self.last_key)) else: last_value = 0 if last_value <= start_value: if start_value < CLI_MAX_RESULT_HISTORY: start_value += 1 else: start_value = 1 last_value = 2 else: if last_value < CLI_MAX_RESULT_HISTORY: start_value += 1 last_value += 1 else: start_value += 1 last_value = 1 self.hd.set(self.start_key, start_value) self.hd.set(self.last_key, last_value) # filedb might leave more than 1 same key item. while self.hd.get(str(start_value)): self.hd.rem(str(start_value)) result_file = '%s%d' % (CLI_RESULT_FILE, start_value) open(result_file, 'w').write(result) if not self.no_secure and 'password=' in ' '.join(cmd): cmds2 = [] for cmd2 in cmd: if not 'password=' in cmd2: cmds2.append(cmd2) else: cmds2.append(cmd2.split('=')[0] + '=' + '******') cmd = ' '.join(cmds2) self.hd.set(str(start_value), [cmd, success]) def read_more(self, num=None, need_print=True, full_info=True): """ need_print will indicate whether print the command result to screen. full_info will indicate whether return command and params information when return command results. """ start_value = self.hd.get(self.start_key) last_value = self.hd.get(self.last_key) more_usage_list = [text_doc.bold('Usage:'), text_doc.bold('\t%smore NUM\t #show the No. NUM Command result' % prompt), text_doc.bold( '\t%smore\t\t #show all available NUM and Command.' ' The failure command will be marked with "!" before it.' % prompt)] more_usage = '\n'.join(more_usage_list) if not start_value: print 'No command history to display.' return if num: if num.isdigit(): if int(num) > CLI_MAX_CMD_HISTORY: print 'Not find result for number: %s' % num print 'Max number is: %s ' % str(CLI_MAX_RESULT_HISTORY) cprint(more_usage, attrs=['bold'], end='\n') return key = start_value - int(num) + 1 if key <= 0: key += CLI_MAX_RESULT_HISTORY # print key result_list = self.hd.get(str(key)) result_file = '%s%d' % (CLI_RESULT_FILE, key) result = open(result_file, 'r').read() if result_list: output = 'Command: \n\t%s\nResult:\n%s' % \ (result_list[0], result) if need_print: pydoc.pager(output) if full_info: return [result_list[0], output] else: return [result_list[0], result] else: more_list = [] explamation = text_doc.bold('!') if start_value < last_value: for i in range(CLI_MAX_RESULT_HISTORY): if start_value - i > 0: key = start_value - i else: key = start_value - i + CLI_MAX_RESULT_HISTORY cmd_result = self.hd.get(str(key)) cmd_result_list = str(cmd_result[0]).split() cmd = text_doc.bold(cmd_result_list[0]) if len(cmd_result_list) > 1: cmd = cmd + ' ' + ' '.join(cmd_result_list[1:]) if len(cmd_result) <= 2 or cmd_result[2]: more_list.append('[%s]\t %s' % (str(i + 1), cmd)) else: more_list.append('[%s] %s\t %s' % (str(i + 1), explamation, cmd)) else: for i in range(start_value): cmd_result = self.hd.get(str(start_value - i)) cmd_result_list = str(cmd_result[0]).split() cmd = text_doc.bold(cmd_result_list[0]) if len(cmd_result_list) > 1: cmd = cmd + ' ' + ' '.join(cmd_result_list[1:]) if len(cmd_result) <= 2 or cmd_result[2]: more_list.append('[%s]\t %s' % (str(i + 1), cmd)) else: more_list.append('[%s] %s\t %s' % (str(i + 1), explamation, cmd)) more_result = '\n'.join(more_list) header = text_doc.bold('[NUM]\tCOMMAND') more_result = '%s\n%s\n%s' % (header, '-' * 48, more_result) more_result = '%s\n%s' % (more_result, more_usage) pydoc.pager(more_result) return print 'Not find result for number: %s' % num cprint(more_usage, attrs=['bold'], end='\n') def save_json_to_file(self, all_params): def write_to_file(output, file_name, num): file_name = os.path.abspath(file_name) open(file_name, 'w').write(output) print "Saved command: %s result to file: %s" % (str(num), file_name) if not all_params: self.show_help() return nums = all_params[0].split(',') if len(all_params) > 1: file_folder = all_params[1] if len(nums) > 1 and not os.path.isdir(file_folder): print "%s must be a folder, to save more than 1 command" % file_folder return else: file_folder = None if len(all_params)
import os from collections import defaultdict from io import IOBase from itertools import groupby from typing import Dict, List, Tuple, Union, Optional, AnyStr from uuid import uuid4 from zipfile import ZipFile import msgpack import networkx as nx from typing.io import IO from charge.babel import convert_from, IOType from charge.charge_types import Atom from charge.molecule import atoms_neighborhoods_charges from charge.multiprocessor import MultiProcessor from charge.nauty import Nauty from charge.settings import REPO_LOCATION ChargeSet = Dict[int, Dict[str, List[float]]] """A collection of possible charges, indexed by shell size and \ neighborhood canonical key. """ TraceableChargeSet = Dict[int, Dict[str, List[Tuple[float, int, Atom]]]] """A collection of possible charges with the molid of the molecule \ they came from and the core atom of the neighborhood, \ indexed by shell size and neighborhood canonical key. """ EitherChargeSet = Union[ChargeSet, TraceableChargeSet] FileOrFileLike = Union[str, os.PathLike, IOBase, IO[AnyStr]] class Repository: """A collection of atom charges by neighborhood. Args: min_shell: Minimum shell size in this repository. max_shell: Maximum shell size in this repository. nauty: Nauty instance. versioning: If True, assigns a unique int to the lists of charges on each change. Attributes: charges_iacm: A dictionary, keyed by shell size, of \ dictionaries, keyed by neighborhood hash, of lists of \ charges (floats) for the atom at the center of the \ neighborhood. Atoms use IACM types. Optionally, may \ contain tuples of (charge, molid, atom) if the \ repository is traceable. charges_elem: A dictionary, keyed by shell size, of \ dictionaries, keyed by neighborhood hash, of lists of \ charges (floats) for the atom at the center of the \ neighborhood. Atoms use plain elements. Optionally, may \ contain tuples of (charge, molid, atom) if the \ repository is traceable. iso_iacm: A dictionary mapping molids to lists of isomorphic \ molids. Atoms use IACM types. iso_elem: A dictionary mapping molids to lists of isomorphic \ molids. Atoms use plain elements. """ def __init__(self, min_shell: int=1, max_shell: int=7, nauty: Optional[Nauty]=None, traceable: Optional[bool] = False, versioning: Optional[bool]=False) -> None: self.__nauty = nauty if nauty else Nauty() self.__min_shell = max(min_shell, 0) self.__max_shell = max_shell self.__versioning = versioning self.__traceable = traceable if not versioning: self.charges_iacm = defaultdict(lambda: defaultdict(list)) # type: EitherChargeSet self.charges_elem = defaultdict(lambda: defaultdict(list)) # type: EitherChargeSet else: self.charges_iacm = defaultdict(lambda: defaultdict(_VersioningList)) # type: EitherChargeSet self.charges_elem = defaultdict(lambda: defaultdict(_VersioningList)) # type: EitherChargeSet if traceable: self.iso_iacm = defaultdict(list) self.iso_elem = defaultdict(list) @staticmethod def create_from( data_location: str, data_type: IOType=IOType.LGF, min_shell: int=1, max_shell: int=7, traceable: Optional[bool]=False, versioning: Optional[bool]=False, nauty: Optional[Nauty]=None ) -> 'Repository': """Creates a new Repository from a directory of files. Args: data_location: Path to the data directory. data_type: Type of the files to read. min_shell: Minimum shell size to compute. max_shell: Maximum shell size to compute. nauty: Nauty instance. versioning: If True, assigns a unique int to the lists of charges on each change. Returns: A new Repository with data read and processed. """ repo = Repository(min_shell, max_shell, nauty, traceable, versioning) if traceable: repo.charges_iacm, repo.charges_elem, repo.iso_iacm, repo.iso_elem =\ repo.__read_data(data_location, data_type, with_iso=True) else: repo.charges_iacm, repo.charges_elem = repo.__read_data(data_location, data_type) return repo def add_from( self, data_location: str, data_type: IOType = IOType.LGF ) -> None: """Adds a directory of files to the Repository. Args: data_location: Path to the data directory. data_type: Type of the files to read. Raises: ValueError: If the Repository is traceable. """ if self.__traceable: raise ValueError('It is not possible to add data to a traceable repository.') charges_iacm, charges_elem = self.__read_data(data_location, data_type) for shell_size, chdct in charges_iacm.items(): for key, charges in chdct.items(): self.charges_iacm[shell_size][key].extend(charges) self.charges_iacm[shell_size][key].sort() for shell_size, chdct in charges_elem.items(): for key, charges in chdct.items(): self.charges_elem[shell_size][key].extend(charges) self.charges_elem[shell_size][key].sort() def remove_from( self, data_location: str, data_type: IOType = IOType.LGF ) -> None: """Removes a directory of files from the Repository. Args: data_location: Path to the data directory. data_type: Type of the files to read. Raises: ValueError: If the Repository is traceable. """ if self.__traceable: raise ValueError('It is not possible to remove data from a traceable repository.') charges_iacm, charges_elem = self.__read_data(data_location, data_type) for shell_size, chdct in charges_iacm.items(): for key, charges in chdct.items(): for charge in charges: self.charges_iacm[shell_size][key].remove(charge) if len(self.charges_iacm[shell_size][key]) == 0: del self.charges_iacm[shell_size][key] if len(self.charges_iacm[shell_size]) == 0: del self.charges_iacm[shell_size] for shell_size, chdct in charges_elem.items(): for key, charges in chdct.items(): for charge in charges: self.charges_elem[shell_size][key].remove(charge) if len(self.charges_elem[shell_size][key]) == 0: del self.charges_elem[shell_size][key] if len(self.charges_elem[shell_size]) == 0: del self.charges_elem[shell_size] def __read_data( self, data_location: str, data_type: IOType = IOType.LGF, with_iso: bool = False, ) -> Union[Tuple[Dict[int, Dict[str, List[float]]], Dict[int, Dict[str, List[float]]]], Tuple[Dict[int, Dict[str, List[float]]], Dict[int, Dict[str, List[float]]], Dict[int, List[int]], Dict[int, List[int]]]]: extension = data_type.get_extension() molids = [int(fn.replace(extension, '')) for fn in os.listdir(data_location) if fn.endswith(extension)] # load graphs graphs = self.__read_graphs( molids, data_location, extension, data_type) # process with iacm atom types charges_iacm = self.__generate_charges(graphs, 'iacm', self.__traceable, self.__versioning) # process as plain elements charges_elem = self.__generate_charges(graphs, 'atom_type', self.__traceable, self.__versioning) if with_iso: canons_iacm = self.__make_canons(graphs, 'iacm') iso_iacm = self.__make_isomorphics(molids, canons_iacm) canons_elem = self.__make_canons(graphs, 'atom_type') iso_elem = self.__make_isomorphics(molids, canons_elem) return charges_iacm, charges_elem, iso_iacm, iso_elem else: return charges_iacm, charges_elem @staticmethod def read( location: Optional[FileOrFileLike] = REPO_LOCATION, versioning: Optional[bool] = False, nauty: Optional[Nauty]=None ) -> 'Repository': """Create a Repository by loading from a zip file. The zip file must have been created by a call to write(). Args: location: Path to the zip file (or file like object) to be read. nauty: Nauty instance. versioning: If True, assigns a unique int to the lists of charges on each change. Raises: ValueError, UnpackValueError, BadZipFile, RuntimeError: If the zip file is corrupted. Returns: A new Repository. """ repo = Repository(nauty=nauty, versioning=versioning) with ZipFile(location, mode='r') as zf: names = zf.namelist() if not 'meta' in names or not 'charges_iacm' in names or not 'charges_elem' in names: raise ValueError('Zip file is missing "meta", "charges_iacm" or "charges_elem" entries.') repo.__min_shell, repo.__max_shell, repo.__traceable = msgpack.unpackb( zf.read('meta'), raw=False) repo.charges_iacm = msgpack.unpackb( zf.read('charges_iacm'), raw=False) repo.charges_elem = msgpack.unpackb( zf.read('charges_elem'), raw=False) if repo.__traceable: if not 'iso_iacm' in names and not 'iso_elem' in names: raise ValueError('Zip file is missing "iso_iacm" or "iso_elem" entries.') repo.iso_iacm = msgpack.unpackb( zf.read('iso_iacm'), raw=False) repo.iso_elem = msgpack.unpackb( zf.read('iso_elem'), raw=False) if versioning: for _, chdct in repo.charges_iacm.items(): for key, charges in chdct.items(): chdct[key] = _VersioningList(charges) for _, chdct in repo.charges_elem.items(): for key, charges in chdct.items(): chdct[key] = _VersioningList(charges) return repo def write(self, out: Optional[FileOrFileLike] = REPO_LOCATION) -> None: """Write the repository to disk as a zip file. Args: out: Path to the zip file (or file like object) to be written. """ with ZipFile(out, mode='w') as zf: zf.writestr('meta', msgpack.packb( (self.__min_shell, self.__max_shell, self.__traceable))) zf.writestr('charges_iacm', msgpack.packb(self.charges_iacm)) zf.writestr('charges_elem', msgpack.packb(self.charges_elem)) if self.__traceable: zf.writestr('iso_iacm', msgpack.packb(self.iso_iacm)) zf.writestr('iso_elem', msgpack.packb(self.iso_elem)) def __read_graphs( self, molids: List[int], data_location: str, ext: str, data_type: IOType ) -> List[Tuple[int, nx.Graph]]: """Read graphs from a directory of input files.""" graphs = [] with MultiProcessor( _ReadWorker, (data_location, ext, data_type)) as mp: for molid, graph in mp.processed(molids, 'reading files'): graphs.append((molid, graph)) return graphs def __generate_charges( self, graphs: List[Tuple[int, nx.Graph]], color_key: str, traceable: bool=False, versioing: bool=False ) -> Dict[int, Dict[str, List[float]]]: """Generate charges for all shell sizes and neighborhoods.""" if not versioing: charges = defaultdict(lambda: defaultdict(list)) else: charges = defaultdict(lambda: defaultdict(_VersioningList)) if traceable: Worker = _TraceableChargeWorker else: Worker = _ChargeWorker for shell in range(self.__min_shell, self.__max_shell + 1): with MultiProcessor(Worker, (shell, color_key)) as mp: for c in mp.processed(graphs, 'shell %d' % shell): for key, values in c.items(): charges[shell][key] += values for shell in range(self.__min_shell, self.__max_shell + 1): for key, values in charges[shell].items(): charges[shell][key].sort() return charges def __make_isomorphics( self, molids: List[int], canons: Dict[int, str] ) -> Dict[int, List[int]]: """Find isomorphic molids and create map of them.""" isomorphics = defaultdict(list) molids_by_key = sorted(molids, key=lambda molid: canons[molid]) for _, group in groupby(molids_by_key, key=lambda molid: canons[molid]): isogroup = list(group) if len(isogroup) > 1: for molid in isogroup: isomorphics[molid] = isogroup return isomorphics def __make_canons( self, graphs: List[Tuple[int, nx.Graph]], color_key: str ) -> Dict[int, str]: """Canonicalize the given graphs using Nauty.""" canons = dict() with MultiProcessor(_CanonicalizationWorker, color_key) as mp: for molid, canon in mp.processed(graphs): canons[molid] = canon return canons class _ReadWorker: """Reads a graph from a file.""" def __init__(self, data_location: str, extension: str, data_type: IOType): self.__data_location = data_location self.__extension = extension self.__data_type = data_type def process(self, molid: int) -> Tuple[int, nx.Graph]: filename =
<reponame>dgarrett622/EXOSIMS # -*- coding: utf-8 -*- import time import numpy as np from scipy import interpolate import astropy.units as u import astropy.constants as const import os, inspect try: import cPickle as pickle except: import pickle import hashlib from EXOSIMS.Prototypes.Completeness import Completeness from EXOSIMS.util.eccanom import eccanom from EXOSIMS.util.deltaMag import deltaMag class BrownCompleteness(Completeness): """Completeness class template This class contains all variables and methods necessary to perform Completeness Module calculations in exoplanet mission simulation. Args: \*\*specs: user specified values Attributes: minComp (float): Minimum completeness level for detection Nplanets (integer): Number of planets for initial completeness Monte Carlo simulation classpath (string): Path on disk to Brown Completeness filename (string): Name of file where completeness interpolant is stored visits (ndarray): Number of observations corresponding to each star in the target list (initialized in gen_update) updates (nx5 ndarray): Completeness values of successive observations of each star in the target list (initialized in gen_update) """ def __init__(self, Nplanets=1e8, **specs): # bring in inherited Completeness prototype __init__ values Completeness.__init__(self, **specs) # Number of planets to sample self.Nplanets = int(Nplanets) # get path to completeness interpolant stored in a pickled .comp file self.classpath = os.path.split(inspect.getfile(self.__class__))[0] self.filename = specs['modules']['PlanetPopulation'] atts = ['arange','erange','prange','Rprange','Mprange','scaleOrbits','constrainOrbits'] extstr = '' for att in atts: extstr += '%s: ' % att + str(getattr(self.PlanetPopulation, att)) + ' ' ext = hashlib.md5(extstr).hexdigest() self.filename += ext def target_completeness(self, targlist): """Generates completeness values for target stars This method is called from TargetList __init__ method. Args: targlist (TargetList): TargetList class object Returns: comp0 (ndarray): 1D numpy array of completeness values for each target star """ # set up "ensemble visit photometric and obscurational completeness" # interpolant for initial completeness values # bins for interpolant bins = 1000 # xedges is array of separation values for interpolant xedges = np.linspace(0., self.PlanetPopulation.rrange[1].value, bins)*\ self.PlanetPopulation.arange.unit xedges = xedges.to('AU').value # yedges is array of delta magnitude values for interpolant ymin = np.round((-2.5*np.log10(self.PlanetPopulation.prange[1]*\ (self.PlanetPopulation.Rprange[1]/(self.PlanetPopulation.rrange[0]))\ .decompose().value**2))) ymax = np.round((-2.5*np.log10(self.PlanetPopulation.prange[0]*\ (self.PlanetPopulation.Rprange[0]/(self.PlanetPopulation.rrange[1]))\ .decompose().value**2*1e-11))) yedges = np.linspace(ymin, ymax, bins) # number of planets for each Monte Carlo simulation nplan = int(np.min([1e6,self.Nplanets])) # number of simulations to perform (must be integer) steps = int(self.Nplanets/nplan) # path to 2D completeness pdf array for interpolation Cpath = os.path.join(self.classpath, self.filename+'.comp') Cpdf, xedges2, yedges2 = self.genC(Cpath, nplan, xedges, yedges, steps) EVPOCpdf = interpolate.RectBivariateSpline(xedges, yedges, Cpdf.T) EVPOC = np.vectorize(EVPOCpdf.integral) # calculate separations based on IWA smin = np.tan(targlist.OpticalSystem.IWA)*targlist.dist if np.isinf(targlist.OpticalSystem.OWA): smax = xedges[-1]*u.AU else: smax = np.tan(targlist.OpticalSystem.OWA)*targlist.dist # calculate dMags based on limiting dMag dMagmax = targlist.OpticalSystem.dMagLim #np.array([targlist.OpticalSystem.dMagLim]*targlist.nStars) dMagmin = ymin if self.PlanetPopulation.scaleOrbits: L = np.where(targlist.L>0, targlist.L, 1e-10) #take care of zero/negative values smin = smin/np.sqrt(L) smax = smax/np.sqrt(L) dMagmin -= 2.5*np.log10(L) dMagmax -= 2.5*np.log10(L) comp0 = EVPOC(smin.to('AU').value, smax.to('AU').value, dMagmin, dMagmax) return comp0 def gen_update(self, targlist): """Generates dynamic completeness values for multiple visits of each star in the target list Args: targlist (TargetList): TargetList module """ print 'Beginning completeness update calculations' self.visits = np.array([0]*targlist.nStars) self.updates = [] # number of planets to simulate nplan = int(2e4) # normalization time dt = 1e9*u.day # sample quantities which do not change in time a = self.PlanetPopulation.gen_sma(nplan) # AU e = self.PlanetPopulation.gen_eccen(nplan) I = self.PlanetPopulation.gen_I(nplan) # deg O = self.PlanetPopulation.gen_O(nplan) # deg w = self.PlanetPopulation.gen_w(nplan) # deg p = self.PlanetPopulation.gen_albedo(nplan) Rp = self.PlanetPopulation.gen_radius(nplan) # km Mp = self.PlanetPopulation.gen_mass(nplan) # kg rmax = a*(1.+e) rmin = a*(1.-e) # sample quantity which will be updated M = np.random.uniform(high=2.*np.pi,size=nplan) newM = np.zeros((nplan,)) # population values smin = (np.tan(targlist.OpticalSystem.IWA)*targlist.dist).to('AU') if np.isfinite(targlist.OpticalSystem.OWA): smax = (np.tan(targlist.OpticalSystem.OWA)*targlist.dist).to('AU') else: smax = np.array([np.max(self.PlanetPopulation.arange.to('AU').value)*\ (1.+np.max(self.PlanetPopulation.erange))]*targlist.nStars)*u.AU # fill dynamic completeness values for sInd in xrange(targlist.nStars): Mstar = targlist.MsTrue[sInd]*const.M_sun # remove rmax < smin and rmin > smax inside = np.where(rmax > smin[sInd])[0] outside = np.where(rmin < smax[sInd])[0] pInds = np.intersect1d(inside,outside) dynamic = [] # calculate for 5 successive observations for num in xrange(5): if not pInds.any(): dynamic.append(0.) break # find Eccentric anomaly if num == 0: E = eccanom(M[pInds],e[pInds]) newM[pInds] = M[pInds] else: E = eccanom(newM[pInds],e[pInds]) r = a[pInds]*(1.-e[pInds]*np.cos(E)) r1 = r*(np.cos(E) - e[pInds]) r1 = np.hstack((r1.reshape(len(r1),1), r1.reshape(len(r1),1), r1.reshape(len(r1),1))) r2 = (r*np.sin(E)*np.sqrt(1. - e[pInds]**2)) r2 = np.hstack((r2.reshape(len(r2),1), r2.reshape(len(r2),1), r2.reshape(len(r2),1))) a1 = np.cos(O[pInds])*np.cos(w[pInds]) - np.sin(O[pInds])*np.sin(w[pInds])*np.cos(I[pInds]) a2 = np.sin(O[pInds])*np.cos(w[pInds]) + np.cos(O[pInds])*np.sin(w[pInds])*np.cos(I[pInds]) a3 = np.sin(w[pInds])*np.sin(I[pInds]) A = np.hstack((a1.reshape(len(a1),1), a2.reshape(len(a2),1), a3.reshape(len(a3),1))) b1 = -np.cos(O[pInds])*np.sin(w[pInds]) - np.sin(O[pInds])*np.cos(w[pInds])*np.cos(I[pInds]) b2 = -np.sin(O[pInds])*np.sin(w[pInds]) + np.cos(O[pInds])*np.cos(w[pInds])*np.cos(I[pInds]) b3 = np.cos(w[pInds])*np.sin(I[pInds]) B = np.hstack((b1.reshape(len(b1),1), b2.reshape(len(b2),1), b3.reshape(len(b3),1))) # planet position, planet-star distance, apparent separation r = (A*r1 + B*r2)*u.AU # position vector d = np.sqrt(np.sum(r**2, axis=1)) # planet-star distance s = np.sqrt(np.sum(r[:,0:2]**2, axis=1)) # apparent separation beta = np.arccos(r[:,2]/d) # phase angle Phi = self.PlanetPhysicalModel.calc_Phi(beta) # phase function dMag = deltaMag(p[pInds],Rp[pInds],d,Phi) # difference in magnitude toremoves = np.where((s > smin[sInd]) & (s < smax[sInd]))[0] toremovedmag = np.where(dMag < targlist.OpticalSystem.dMagLim)[0] toremove = np.intersect1d(toremoves, toremovedmag) pInds = np.delete(pInds, toremove) if num == 0: dynamic.append(targlist.comp0[sInd]) else: dynamic.append(float(len(toremove))/nplan) # update M mu = const.G*(Mstar+Mp[pInds]) n = np.sqrt(mu/a[pInds]**3) newM[pInds] = (newM[pInds] + n*dt)/(2*np.pi) % 1 * 2.*np.pi self.updates.append(dynamic) if (sInd+1) % 50 == 0: print 'stars: %r / %r' % (sInd+1,targlist.nStars) self.updates = np.array(self.updates) print 'Completeness update calculations finished' def completeness_update(self, sInd, targlist, obsbegin, obsend, nexttime): """Updates completeness value for stars previously observed Args: sInd (integer): Index of star just observed targlist (TargetList): TargetList class module obsbegin (astropy Quantity): Time of observation begin in units of day obsend (astropy Quantity): Time of observation end in units of day nexttime (astropy Quantity): Time of next observational period in units of day Returns: comp0 (ndarray): Completeness values for each star in the target list """ self.visits[sInd] += 1 if self.visits[sInd] > len(self.updates[sInd])-1: targlist.comp0[sInd] = self.updates[sInd][-1] else: targlist.comp0[sInd] = self.updates[sInd][self.visits[sInd]] return targlist.comp0 def genC(self, Cpath, nplan, xedges, yedges, steps): """Gets completeness interpolant for initial completeness This function either loads a completeness .comp file based on specified Planet Population module or performs Monte Carlo simulations to get the 2D completeness values needed for interpolation. Args: Cpath (string): path to 2D completeness value array nplan (float): number of planets used in each simulation xedges (ndarray): 1D numpy ndarray of x edge of 2d histogram (separation) yedges (ndarray): 1D numpy ndarray of y edge of 2d histogram (dMag) steps (integer): number of simulations to perform Returns: H (ndarray): 2D numpy ndarray of completeness probability density values """ # if the 2D completeness pdf array exists as a .comp file load it if os.path.exists(Cpath): print 'Loading cached completeness file from "%s".' % Cpath H = pickle.load(open(Cpath, 'rb')) print 'Completeness loaded from cache.' #h, xedges, yedges = self.hist(nplan, xedges, yedges) else: # run Monte Carlo simulation and pickle the resulting array print 'Cached completeness file not found at "%s".' % Cpath print 'Beginning Monte Carlo completeness calculations.' t0, t1 = None, None # keep track of per-iteration time for i in xrange(steps): t0, t1 = t1, time.time() if t0 is None: delta_t_msg = '' # no message else: delta_t_msg = '[%.3f s/iteration]' % (t1 - t0) print 'Completeness iteration: %5d / %5d %s' % (i+1, steps, delta_t_msg) # get completeness histogram h, xedges, yedges = self.hist(nplan, xedges, yedges) if i == 0: H = h else: H += h H = H/(self.Nplanets*(xedges[1]-xedges[0])*(yedges[1]-yedges[0])) # store 2D completeness pdf array as .comp file pickle.dump(H, open(Cpath, 'wb')) print 'Monte Carlo completeness calculations finished' print '2D completeness array stored in %r' % Cpath return H, xedges, yedges def hist(self, nplan, xedges, yedges): """Returns completeness histogram for Monte Carlo simulation This function uses the inherited Planet Population module. Args: nplan (float): Number of planets used xedges (ndarray): 1D numpy ndarray of x edge of 2d histogram (separation) yedges (ndarray): 1D numpy ndarray of y edge of 2d histogram (dMag) Returns: h (ndarray): 2D numpy ndarray containing
fdopen.argtypes = [c_int, c_char_p] fdopen.restype = c_void_p fdopen.errcheck = self.errcheck with open(savFileName, mode) as f: self.fd = fdopen(f.fileno(), mode) if mode == "rb": spssOpen = self.spssio.spssOpenRead elif mode == "wb": spssOpen = self.spssio.spssOpenWrite elif mode == "cp": spssOpen = self.spssio.spssOpenWriteCopy elif mode == "ab": spssOpen = self.spssio.spssOpenAppend savFileName = self._encodeFileName(savFileName) refSavFileName = self._encodeFileName(refSavFileName) sav = c_char_p(savFileName) fh = c_int(self.fd) if mode == "cp": retcode = spssOpen(sav, c_char_p(refSavFileName), pointer(fh)) else: retcode = spssOpen(sav, pointer(fh)) if retcode > 0: msg = "Error opening file %r in mode %r" raise SPSSIOError(msg % (savFileName, mode), retcode) return fh.value def closeSavFile(self, fh, mode="rb"): """This function closes the sav file associated with <fh> that was open in mode <mode>.""" if mode == "rb": spssClose = self.spssio.spssCloseRead elif mode == "wb": spssClose = self.spssio.spssCloseWrite elif mode == "ab": spssClose = self.spssio.spssCloseAppend retcode = spssClose(c_int(fh)) if retcode > 0: raise SPSSIOError("Error closing file in mode %r" % mode, retcode) @property def releaseInfo(self): """This function reports release- and machine-specific information about the open file.""" relInfo = ["release number", "release subnumber", "fixpack number", "machine code", "floating-point representation code", "compression scheme code", "big/little-endian code", "character representation code"] relInfoArr = (c_int * len(relInfo))() retcode = self.spssio.spssGetReleaseInfo(c_int(self.fh), relInfoArr) if retcode > 0: raise SPSSIOError("Error getting ReleaseInfo", retcode) info = dict([(item, relInfoArr[i]) for i, item in enumerate(relInfo)]) return info @property def spssVersion(self): """Return the SPSS version that was used to create the opened file as a three-tuple indicating major, minor, and fixpack version as ints. NB: in the transition from SPSS to IBM, a new four-digit versioning nomenclature is used. This function returns the old three-digit nomenclature. Therefore, no patch version information is available.""" info = self.releaseInfo major = info["release number"] minor = info["release subnumber"] fixpack = info["fixpack number"] return major, minor, fixpack @property def fileCompression(self): """Get/Set the file compression. Returns/Takes a compression switch which may be any of the following: 'uncompressed', 'standard', or 'zlib'. Zlib comression requires SPSS v21 I/O files.""" compression = {0: "uncompressed", 1: "standard", 2: "zlib"} compSwitch = c_int() func = self.spssio.spssGetCompression retcode = func(c_int(self.fh), byref(compSwitch)) if retcode > 0: raise SPSSIOError("Error getting file compression", retcode) return compression.get(compSwitch.value) @fileCompression.setter def fileCompression(self, compSwitch): compression = {"uncompressed": 0, "standard": 1, "zlib": 2} compSwitch = compression.get(compSwitch) func = self.spssio.spssSetCompression retcode = func(c_int(self.fh), c_int(compSwitch)) invalidSwitch = retcodes.get(retcode) == 'SPSS_INVALID_COMPSW' if invalidSwitch and self.spssVersion[0] < 21: msg = "Writing zcompressed files requires >=v21 SPSS I/O libraries" raise ValueError(msg) elif retcode > 0: raise SPSSIOError("Error setting file compression", retcode) @property def systemString(self): """This function returns the name of the system under which the file was created aa a string.""" sysName = create_string_buffer(42) func = self.spssio.spssGetSystemString retcode = func(c_int(self.fh), byref(sysName)) if retcode > 0: raise SPSSIOError("Error getting SystemString", retcode) return sysName.value def getStruct(self, varTypes, varNames, mode="rb"): """This function returns a compiled struct object. The required struct format string for the conversion between C and Python is created on the basis of varType and byte order. --varTypes: SPSS data files have either 8-byte doubles/floats or n-byte chars[]/ strings, where n is always 8 bytes or a multiple thereof. --byte order: files are written in the byte order of the host system (mode="wb") and read/appended using the byte order information contained in the SPSS data file (mode is "ab" or "rb" or "cp")""" if mode in ("ab", "rb", "cp"): # derive endianness from file endianness = self.releaseInfo["big/little-endian code"] endianness = ">" if endianness > 0 else "<" elif mode == "wb": # derive endianness from host if sys.byteorder == "little": endianness = "<" elif sys.byteorder == "big": endianness = ">" else: endianness = "@" structFmt = [endianness] ceil = math.ceil for varName in varNames: varType = varTypes[varName] if varType == 0: structFmt.append("d") else: fmt = str(int(ceil(int(varType) / 8.0) * 8)) structFmt.append(fmt + "s") return struct.Struct("".join(structFmt)) def getCaseBuffer(self): """This function returns a buffer and a pointer to that buffer. A whole case will be read into this buffer.""" caseSize = c_long() retcode = self.spssio.spssGetCaseSize(c_int(self.fh), byref(caseSize)) caseBuffer = create_string_buffer(caseSize.value) if retcode > 0: raise SPSSIOError("Problem getting case buffer", retcode) return caseBuffer @property def sysmis(self): """This function returns the IBM SPSS Statistics system-missing value ($SYSMIS) for the host system (also called 'NA' in other systems).""" try: sysmis = -1 * sys.float_info[0] # Python 2.6 and higher. except AttributeError: self.spssio.spssSysmisVal.restype = c_float sysmis = self.spssio.spssSysmisVal() return sysmis @property def missingValuesLowHigh(self): """This function returns the 'lowest' and 'highest' values used for numeric missing value ranges on the host system. This can be used in a similar way as the LO and HI keywords in missing values specifications (cf. MISSING VALUES foo (LO THRU 0). It may be called at any time.""" lowest, highest = c_double(), c_double() func = self.spssio.spssLowHighVal retcode = func(byref(lowest), byref(highest)) return lowest.value, highest.value @property def ioLocale(self): """This function gets/sets the I/O Module's locale. This corresponds with the SPSS command SET LOCALE. The I/O Module's locale is separate from that of the client application. The <localeName> parameter and the return value are identical to those for the C run-time function setlocale. The exact locale name specification depends on the OS of the host sytem, but has the following form: <lang>_<territory>.<codeset>[@<modifiers>] The 'codeset' and 'modifier' components are optional and in Windows, aliases (e.g. 'english') may be used. When the I/O Module is first loaded, its locale is set to the system default. See also: --https://wiki.archlinux.org/index.php/Locale --http://msdn.microsoft.com/en-us/library/39cwe7zf(v=vs.80).aspx""" if hasattr(self, "setLocale"): return self.setLocale else: currLocale = ".".join(locale.getlocale()) print "NOTE. Locale not set; getting current locale: ", currLocale return currLocale @ioLocale.setter def ioLocale(self, localeName=""): if not localeName: localeName = ".".join(locale.getlocale()) func = self.spssio.spssSetLocale func.restype = c_char_p self.setLocale = func(c_int(locale.LC_ALL), c_char_p(localeName)) if self.setLocale is None: raise ValueError("Invalid ioLocale: %r" % localeName) return self.setLocale @property def fileCodePage(self): """This function provides the Windows code page number of the encoding applicable to a file.""" nCodePage = c_int() func = self.spssio.spssGetFileCodePage retcode = func(c_int(self.fh), byref(nCodePage)) return nCodePage.value def isCompatibleEncoding(self): """This function determines whether the file and interface encoding are compatible.""" try: # Windows, note typo 'Endoding'! func = self.spssio.spssIsCompatibleEndoding except AttributeError: func = self.spssio.spssIsCompatibleEncoding func.restype = c_bool isCompatible = c_int() retcode = func(c_int(self.fh), byref(isCompatible)) if retcode > 0: msg = "Error testing encoding compatibility: %r" raise SPSSIOError(msg % isCompatible.value, retcode) if not isCompatible.value and not self.ioUtf8: msg = ("NOTE. SPSS Statistics data file %r is written in a " + "character encoding (%s) incompatible with the current " + "ioLocale setting. It may not be readable. Consider " + "changing ioLocale or setting ioUtf8=True.") print msg % (self.savFileName, self.fileEncoding) return bool(isCompatible.value) @property def ioUtf8(self): """This function returns/sets the current interface encoding. ioUtf8 = False --> CODEPAGE mode, ioUtf8 = True --> UTF-8 mode, aka. Unicode mode This corresponds with the SPSS command SHOW UNICODE (getter) and SET UNICODE=ON/OFF (setter).""" if hasattr(self, "ioUtf8_"): return self.ioUtf8_ self.ioUtf8_ = self.spssio.spssGetInterfaceEncoding() return bool(self.ioUtf8_) @ioUtf8.setter def ioUtf8(self, ioUtf8): try: retcode = self.spssio.spssSetInterfaceEncoding(c_int(int(ioUtf8))) if retcode > 0 and not self.encoding_and_locale_set: # not self.encoding_and_locale_set --> nested context managers raise SPSSIOError("Error setting IO interface", retcode) except TypeError: msg = "Invalid interface encoding: %r (must be bool)" raise SPSSIOError(msg % ioUtf8) @property def fileEncoding(self): """This function obtains the encoding applicable to a file. The encoding is returned as an IANA encoding name, such as ISO-8859-1, which is then converted to the corresponding Python codec name. If the file contains no file encoding, the locale's preferred encoding is returned""" preferredEncoding = locale.getpreferredencoding() try: pszEncoding = create_string_buffer(20) # is 20 enough?? func = self.spssio.spssGetFileEncoding retcode = func(c_int(self.fh), byref(pszEncoding)) if retcode > 0: raise SPSSIOError("Error getting file encoding", retcode) iana_codes = encodings.aliases.aliases rawEncoding = pszEncoding.value.lower() if rawEncoding.replace("-", "") in iana_codes: iana_code = rawEncoding.replace("-", "") else: iana_code = rawEncoding.replace("-", "_") fileEncoding = iana_codes[iana_code] return fileEncoding except AttributeError: print ("NOTE. Function 'getFileEncoding' not found. You are " + "using a .dll from SPSS < v16.") return preferredEncoding except KeyError: print ("NOTE. IANA coding lookup error.
as 'LO_yYYYYmMMdDD.nc' :arg str bc_dir: the directory in which to save the results. :arg str LO_dir: the directory in which Live Ocean results are stored. :arg str NEMO_BC: path to an example NEMO boundary condition file for loading boundary info. """ # Create metadeta for temperature and salinity var_meta = {'vosaline': {'grid': 'SalishSea2', 'long_name': 'Practical Salinity', 'units': 'psu'}, 'votemper': {'grid': 'SalishSea2', 'long_name': 'Potential Temperature', 'units': 'deg C'} } # Mapping from LiveOcean TS names to NEMO TS names LO_to_NEMO_var_map = {'salt': 'vosaline', 'temp': 'votemper'} # Initialize var_arrays dict NEMO_var_arrays = {key: [] for key in LO_to_NEMO_var_map.values()} # Load BC information depBC, lonBC, latBC, shape = load_SalishSea_boundary_grid(fname=NEMO_BC) # Load and interpolate Live Ocean if not nowcast: files = _list_LO_time_series_files(start, end, LO_dir) save_dir = bc_dir else: print('Preparing 72 hours of Live Ocean results.' 'Argument end = {} is ignored'.format(end)) files = _list_LO_files_for_nowcast(start, LO_dir) save_dir = os.path.join(bc_dir, start) if not os.path.isdir(save_dir): os.mkdir(save_dir) LO_dataset = load_LiveOcean(files, resample_interval=avg_period) depth_interps = interpolate_to_NEMO_depths(LO_dataset, depBC, ['salt', 'temp']) lateral_interps = interpolate_to_NEMO_lateral(depth_interps, LO_dataset, lonBC, latBC, shape) lateral_interps['ocean_time'] = LO_dataset.ocean_time # convert to TEOS-10 if necessary if teos_10: var_meta, lateral_interps['salt'], lateral_interps['temp'] = \ _convert_TS_to_TEOS10(var_meta, lateral_interps['salt'], lateral_interps['temp']) # divide up data and save into separate files _separate_and_save_files(lateral_interps, avg_period, file_frequency, basename, save_dir, LO_to_NEMO_var_map, var_meta, NEMO_var_arrays, NEMO_BC) # make time_counter the record dimension using ncks and compress files = glob.glob(os.path.join(save_dir, '*.nc')) for f in files: cmd = ['ncks', '--mk_rec_dmn=time_counter', '-O', f, f] sp.call(cmd) cmd = ['ncks', '-4', '-L4', '-O', f, f] sp.call(cmd) # move files around if nowcast: _relocate_files_for_nowcast(start, save_dir, basename, bc_dir) def _relocate_files_for_nowcast(start_date, save_dir, basename, bc_dir): """Organize the files for use in the nowcast framework. Orginally, files are save in bc_dir/start/basename_y...nc For the nowcast system we want file start_date+1 in bc_dir and start_date+2 in bc_dir/fcst :arg str start_date: the start_date of the LO simulation in format %Y-%m-%d :arg str save_dir: the directory where the boundary files are orginally saved. Should be bc_dir/start_date/.. :arg str basename: The basename of the boundary files, e.g. LO :arg str bc_dir: The directory to save the bc files. """ rundate = datetime.datetime.strptime(start_date, '%Y-%m-%d') for d, subdir in zip([1, 2], ['', 'fcst']): next_date = rundate + datetime.timedelta(days=d) d_file = os.path.join( save_dir, '{}_{}.nc'.format(basename, next_date.strftime('y%Ym%md%d') ) ) if os.path.isfile(d_file): os.rename(d_file, os.path.join(bc_dir, subdir, os.path.basename(d_file))) if not os.listdir(save_dir): os.rmdir(save_dir) def _list_LO_time_series_files(start, end, LO_dir): """ List the Live Ocean files in a given date range [start, end]. LO nowcast files that form a time series are used. Note: If start='2016-06-01' and end= '2016-06-02' results will be a list starting with LO_dir/2016-05-31/ocean_his_0025_UBC.nc and ending with LO_dir/2016-06-02/ocean_his_0024_UBC.nc. The times in these files represent 2016-06-01 00:00:00 to 2016-06-02 23:00:00. :arg str start: start date in format 'yyyy-mm-dd' :arg str end: end date in format 'yyyy-mm-dd :arg str LO_dir: the file path where Live Ocean results are stored :returns: list of Live Ocean file names """ sdt = (datetime.datetime.strptime(start, '%Y-%m-%d') - datetime.timedelta(days=1)) edt = datetime.datetime.strptime(end, '%Y-%m-%d') sstr = os.path.join( LO_dir, '{}/ocean_his_0025_UBC.nc'.format(sdt.strftime('%Y%m%d'))) estr = os.path.join( LO_dir, '{}/ocean_his_0024_UBC.nc'.format(edt.strftime('%Y%m%d'))) allfiles = glob.glob(os.path.join(LO_dir, '*/*UBC.nc')) files = [] for filename in allfiles: if filename >= sstr and filename <= estr: files.append(filename) # remove files outside of first 24hours for each day regex = re.compile(r'_00[3-7][0-9]|_002[6-9]') keeps = [x for x in files if not regex.search(x)] keeps.sort() return keeps def _list_LO_files_for_nowcast(rundate, LO_dir): """ List 48 hours of Live Ocean files that began on rundate. Used for creation of nowcast system boundary conditions. Each Live Ocean run date contains 72 hours. This funtcion returns the files that represent hours 23 through 71. Example: if rundate='2016-06-01' the listed files will be LO_dir/20160601/ocean_his_0025_UBC.nc to LO_dir/20160601/ocean_his_0072_UBC.nc The times in these files represent 2016-06-02 00:00:00 to 2016-06-03 23:00:00. :arg str rundate: The Live Ocean rundate in format 'yyyy-mm-dd' :arg str LO_dir: the file path where Live Ocean results are stored :returns: list of Live Ocean file names """ sdt = datetime.datetime.strptime(rundate, '%Y-%m-%d') allfiles = glob.glob(os.path.join(LO_dir, sdt.strftime('%Y%m%d'), '*.nc')) start_str = 'ocean_his_0025_UBC.nc' end_str = 'ocean_his_0072_UBC.nc' files_return = [] for filename in allfiles: if os.path.basename(filename) >= start_str: if os.path.basename(filename) <= end_str: files_return.append(filename) files_return.sort(key=os.path.basename) return files_return def _separate_and_save_files(interpolated_data, avg_period, file_frequency, basename, save_dir, LO_to_NEMO_var_map, var_meta, NEMO_var_arrays, NEMO_BC_file): """Separates and saves variables in interpolated_data into netCDF files given a desired file frequency. :arg interpolated_data: a dictionary containing variable arrays and time. Keys are LO variable names. :type interpolated_data: dictionary of numpy arrays for varables and an xarray dataarray for time. :arg str avg_period: The averaging period for the forcing files. options are '1H' for hourly, '1D' for daily, '7D' for weekly, '1M' for monthly :arg str file_frequency: The frequency by which the files will be saved. Options are: * 'yearly' files that contain a year of data and look like *_yYYYY.nc * 'monthly' for files that contain a month of data and look like *_yYYYYmMM.nc * 'daily' for files that contain a day of data and look like *_yYYYYmMMdDD.nc where * is the basename. :arg str basename: the base name of the saved files. Eg. basename='LO', file_frequency='daily' saves files as 'LO_yYYYYmMMdDD.nc' :arg str save_dir: the directory in which to save the results :arg LO_to_NEMO_var_map: a dictionary mapping between LO variable names (keys) and NEMO variable names (values) :type LO_to_NEMO_var_map: a dictionary with string key-value pairs :arg var_meta: metadata for each variable in var_arrays. Keys are NEMO variable names. :type var_meta: a dictionary of dictionaries with key-value pairs of metadata :arg NEMO_var_arrays: a dictionary containing the boundary data to be saved. :type NEMO_var_arrays: dictionary of numpy arrays :arg str NEMO_BC_file: path to an example NEMO boundary condition file for loading boundary info. """ time_units = {'1H': 'hours', '1D': 'days', '7D': 'weeks', '1M': 'months'} index = 0 first = datetime.datetime.strptime( str(interpolated_data['ocean_time'].values[0])[0:-3], '%Y-%m-%dT%H:%M:%S.%f' ) # I don't really like method of retrieving the date from LO results. # Is it necessary? . first = first.replace(second=0, microsecond=0) for counter, t in enumerate(interpolated_data['ocean_time']): date = datetime.datetime.strptime(str(t.values)[0:-3], '%Y-%m-%dT%H:%M:%S.%f') conditions = {'yearly': date.year != first.year, 'monthly': date.month != first.month, # above doesn't work if same months, different year... 'daily': date.date() != first.date() } filenames = { 'yearly': os.path.join(save_dir, '{}_y{}.nc'.format(basename, first.year) ), 'monthly': os.path.join(save_dir, '{}_y{}m{:02d}.nc'.format(basename, first.year, first.month) ), 'daily': os.path.join(save_dir, '{}_y{}m{:02d}d{:02d}.nc'.format(basename, first.year, first.month, first.day) ) } if conditions[file_frequency]: for LO_name, NEMO_name in LO_to_NEMO_var_map.items(): NEMO_var_arrays[NEMO_name] = \ interpolated_data[LO_name][index:counter, :, :, :] _create_sub_file(first, time_units[avg_period], NEMO_var_arrays, var_meta, NEMO_BC_file, filenames[file_frequency]) first = date index = counter elif counter == interpolated_data['ocean_time'].values.shape[0]-1: for LO_name, NEMO_name in LO_to_NEMO_var_map.items(): NEMO_var_arrays[NEMO_name] = \ interpolated_data[LO_name][index:, :, :, :] _create_sub_file(first, time_units[avg_period], NEMO_var_arrays, var_meta, NEMO_BC_file, filenames[file_frequency]) def _create_sub_file(date, time_unit, var_arrays, var_meta, NEMO_BC, filename): """Save a netCDF file for boundary data stored in var_arrays. :arg date: Date from which time in var_arrays is measured. :type date: datetime object :arg str time_unit: Units that time in var_arrays is measured in. e.g 'days' or 'weeks' or 'hours' :arg var_arrays: a dictionary containing the boundary data to be saved. :type var_arrays: dictionary of numpy arrays :arg var_meta: metadata for each variable in var_arrays :type var_meta: a dictionary of dictionaries with key-value pairs of metadata :arg str NEMO_BC: path to a current NEMO boundary file. Used for looking up boundary indices etc. :arg str filename: The name of the file to be saved. """ # Set up xarray Dataset ds = xr.Dataset() # Load BC information f = nc.Dataset(NEMO_BC) depBC = f.variables['deptht'] # Copy variables and attributes of non-time dependent variables # from a previous BC file keys = list(f.variables.keys()) for var_name in var_arrays: if var_name in keys: # check that var_name can be removed keys.remove(var_name) keys.remove('time_counter') # Allow xarray to build these arrays keys.remove('deptht') # Now iterate through remaining variables in old BC file and add to dataset for key in keys: var = f.variables[key] temp_array = xr.DataArray(var, name=key, dims=list(var.dimensions), attrs={att: var.getncattr(att) for att in var.ncattrs()} ) ds = xr.merge([ds, temp_array]) # Add better units information nbidta etc # for varname in ['nbidta', 'nbjdta', 'nbrdta']: # ds[varname].attrs['units'] = 'index' # Now add the time-dependent model variables for var_name, var_array in var_arrays.items(): data_array = xr.DataArray(var_array, name=var_name,
import numpy as np from scipy.io.idl import readsav from scipy.interpolate import interp1d import h5py from astropy.io import fits import shutil import glob import os def dimensions(instrument): if instrument == 'HARPS': M = 4096 # pixels per order R = 72 # orders elif instrument == 'HARPS-N': M = 4096 # pixels per order R = 69 # orders else: print("instrument not recognized. valid options are: HARPS, HARPS-N") assert False return M, R def read_spec_2d(spec_file, blaze=False, flat=False): '''Read a HARPS 2D spectrum file from the ESO pipeline Parameters ---------- spec_file : string name of the fits file with the data (e2ds format) blaze : boolean if True, then divide out the blaze function from flux flat : boolean if True, then divide out the flatfield from flux Returns ------- wave : np.ndarray (shape n_orders x 4096) wavelength (in Angstroms) flux : np.ndarray (shape n_orders x 4096) flux value ''' path = spec_file[0:str.rfind(spec_file,'/')+1] sp = fits.open(spec_file) header = sp[0].header flux = sp[0].data try: wave_file = header['HIERARCH ESO DRS CAL TH FILE'] except KeyError: # HARPS-N wave_file = header['HIERARCH TNG DRS CAL TH FILE'] wave_file = str.replace(wave_file, 'e2ds', 'wave') # just in case of header mistake.. # ex. HARPS.2013-03-13T09:20:00.346_ccf_M2_A.fits try: ww = fits.open(path+wave_file) wave = ww[0].data except: print("Wavelength solution file {0} not found!".format(wave_file)) return if blaze: blaze_file = header['HIERARCH ESO DRS BLAZE FILE'] bl = fits.open(path+blaze_file) blaze = bl[0].data flux /= blaze if flat: flat_file = header['HIERARCH ESO DRS CAL FLAT FILE'] fl = fits.open(path+flat_file) flat = fl[0].data flux /= flat return wave, flux def read_snr(filename, instrument='HARPS'): '''Parse SNR from header of a HARPS(-S or -N) file from the ESO or TNG pipelines Parameters ---------- filename : string name of the fits file with the data (can be ccf, e2ds, s1d) Returns ------- snr : np.ndarray SNR values taken near the center of each order ''' sp = fits.open(filename) header = sp[0].header if instrument=='HARPS': n_orders = 72 elif instrument=='HARPS-N': n_orders = 69 else: print("ERROR: instrument {0} not recognized.".format(instrument)) return snr = np.arange(n_orders, dtype=np.float) for i in np.nditer(snr, op_flags=['readwrite']): if instrument=='HARPS': i[...] = header['HIERARCH ESO DRS SPE EXT SN{0}'.format(str(int(i)))] elif instrument=='HARPS-N': i[...] = header['HIERARCH TNG DRS SPE EXT SN{0}'.format(str(int(i)))] return snr def read_data_from_fits(filelist, instrument='HARPS', e2ds=False): '''Parses a list of HARPS CCF files. Parameters ---------- filelist : list of strings list of filenames for HARPS (or HARPS-N) CCF files Returns ------- data : list of numpy arrays flux values in format [(N_epochs, M_pixels) for r in R_orders]. note that the echelle orders may be of different pixel lengths, but all epochs must be consistent. ivars : list of numpy arrays Inverse variance errors on data in the same format. xs : list of numpy arrays Wavelength values for each pixel, in the same format as data. pipeline_rvs : numpy array N_epoch length array of RVs estimated by the HARPS pipeline. These RVs are drift-corrected but NOT barycentric corrected. pipeline_sigmas : numpy array N_epoch length array of error estimates on HARPS pipeline RVs. dates : numpy array N_epoch length array of observation times. bervs : numpy array N_epoch length array of barycentric RVs. airms : numpy array N_epoch length array of airmass. drifts : numpy array N_epoch length array of instrumental drifts. ''' N = len(filelist) # number of epochs M, R = dimensions(instrument) data = [np.zeros((N,M)) for r in range(R)] ivars = [np.zeros((N,M)) for r in range(R)] xs = [np.zeros((N,M)) for r in range(R)] empty = np.array([], dtype=int) pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) for n,f in enumerate(filelist): sp = fits.open(f) if not e2ds: try: if instrument == 'HARPS': pipeline_rvs[n] = sp[0].header['HIERARCH ESO DRS CCF RVC'] * 1.e3 # m/s pipeline_sigmas[n] = sp[0].header['HIERARCH ESO DRS CCF NOISE'] * 1.e3 # m/s drifts[n] = sp[0].header['HIERARCH ESO DRS DRIFT SPE RV'] elif instrument == 'HARPS-N': pipeline_rvs[n] = sp[0].header['HIERARCH TNG DRS CCF RVC'] * 1.e3 # m/s pipeline_sigmas[n] = sp[0].header['HIERARCH TNG DRS CCF NOISE'] * 1.e3 # m/s drifts[n] = sp[0].header['HIERARCH TNG DRS DRIFT RV USED'] except KeyError: print("WARNING: {0} does not appear to be a stellar CCF file. Skipping this one.".format(f)) empty = np.append(empty, n) continue if instrument == 'HARPS': dates[n] = sp[0].header['HIERARCH ESO DRS BJD'] bervs[n] = sp[0].header['HIERARCH ESO DRS BERV'] * 1.e3 # m/s airms[n] = sp[0].header['HIERARCH ESO TEL AIRM START'] elif instrument == 'HARPS-N': dates[n] = sp[0].header['HIERARCH TNG DRS BJD'] bervs[n] = sp[0].header['HIERARCH TNG DRS BERV'] * 1.e3 # m/s airms[n] = sp[0].header['AIRMASS'] spec_file = str.replace(f, 'ccf_G2', 'e2ds') spec_file = str.replace(spec_file, 'ccf_M2', 'e2ds') spec_file = str.replace(spec_file, 'ccf_K5', 'e2ds') try: wave, spec = read_spec_2d(spec_file) except: empty = np.append(empty, n) continue snrs = read_snr(f, instrument=instrument) # HACK # save stuff for r in range(R): data[r][n,:] = spec[r,:] ivars[r][n,:] = snrs[r]**2/spec[r,:]/np.nanmean(spec[r,:]) # scaling hack xs[r][n,:] = wave[r,:] # delete data without wavelength solutions: for r in range(R): data[r] = np.delete(data[r], empty, axis=0) ivars[r] = np.delete(ivars[r], empty, axis=0) xs[r] = np.delete(xs[r], empty, axis=0) pipeline_rvs = np.delete(pipeline_rvs, empty) pipeline_sigmas = np.delete(pipeline_sigmas, empty) dates = np.delete(dates, empty) bervs = np.delete(bervs, empty) airms = np.delete(airms, empty) drifts = np.delete(drifts, empty) # re-introduce BERVs to HARPS results: pipeline_rvs -= bervs pipeline_rvs -= np.mean(pipeline_rvs) return data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts def savfile_to_filelist(savfile, destination_dir='../data/'): # copies CCF + E2DS files to destination_dir and returns a list of the CCFs # MB personal use only - I have a lot of old IDL files! s = readsav(savfile) filelist = [] files = [f.decode('utf8') for f in s.files] for f in files: shutil.copy2(f, destination_dir) spec_file = str.replace(f, 'ccf_G2', 'e2ds') shutil.copy2(spec_file, destination_dir) basename = f[str.rfind(f,'/')+1:] filelist = np.append(filelist, destination_dir+basename) return filelist def missing_wavelength_files(filelist): # loop through files and make sure that their wavelength solutions exist # return list of all missing wavelength solution files missing_files = [] for f in filelist: path = f[0:str.rfind(f,'/')+1] sp = fits.open(f) header = sp[0].header wave_file = header['HIERARCH ESO DRS CAL TH FILE'] if os.path.isfile(path+wave_file): continue else: missing_files = np.append(missing_files, wave_file) return np.unique(missing_files) def write_data(data, ivars, xs, pipeline_rvs, pipeline_sigmas, dates, bervs, airms, drifts, filenames, hdffile): '''Write processed HARPS data to HDF5 file. Note that currently all input parameters are required, but the following ones can be populated with zeros if you don't have them: pipeline_rvs, pipeline_sigmas, bervs, drifts, filenames These parameters *are* strictly required: data, ivars, xs, dates, airms And bervs is strongly recommended as they are used to initialize any stellar RVs. Parameters ---------- data : list of numpy arrays flux values in format [(N_epochs, M_pixels) for r in R_orders]. note that the echelle orders may be of different pixel lengths, but all epochs must be consistent. ivars : list of numpy arrays Inverse variance errors on data in the same format. xs : list of numpy arrays Wavelength values for each pixel, in the same format as data. pipeline_rvs : numpy array N_epoch length array of RVs estimated by the HARPS pipeline. These RVs are drift-corrected but NOT barycentric corrected. pipeline_sigmas : numpy array N_epoch length array of error estimates on HARPS pipeline RVs. dates : numpy array N_epoch length array of observation times. bervs : numpy array N_epoch length array of barycentric RVs. airms : numpy array N_epoch length array of airmass. drifts : numpy array N_epoch length array of instrumental drifts. filenames : list or numpy array N_epoch length list of data files. hdffile : string Filename to write to. ''' h = h5py.File(hdffile, 'w') dset = h.create_dataset('data', data=data) dset = h.create_dataset('ivars', data=ivars) dset = h.create_dataset('xs', data=xs) dset = h.create_dataset('pipeline_rvs', data=pipeline_rvs) dset = h.create_dataset('pipeline_sigmas', data=pipeline_sigmas) dset = h.create_dataset('dates', data=dates) dset = h.create_dataset('bervs', data=bervs) dset = h.create_dataset('airms', data=airms)
<gh_stars>10-100 # KVM-based Discoverable Cloudlet (KD-Cloudlet) # Copyright (c) 2015 Carnegie Mellon University. # All Rights Reserved. # # THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS. # # Released under a modified BSD license, please see license.txt for full terms. # DM-0002138 # # KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses: # MiniMongo # Copyright (c) 2010-2014, <NAME> # All rights reserved. Released under BSD license. # https://github.com/MiniMongo/minimongo/blob/master/LICENSE # # Bootstrap # Copyright (c) 2011-2015 Twitter, Inc. # Released under the MIT License # https://github.com/twbs/bootstrap/blob/master/LICENSE # # jQuery JavaScript Library v1.11.0 # http://jquery.com/ # Includes Sizzle.js # http://sizzlejs.com/ # Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors # Released under the MIT license # http://jquery.org/license import time import os import json # Used to generate unique IDs for the VMs. from uuid import uuid4 from pycloud.pycloud.utils.netutils import generate_random_mac, find_ip_for_mac, is_port_open, get_adapter_ip_address from pycloud.pycloud.mongo import Model from pycloud.pycloud.model.vmimage import VMImage from pycloud.pycloud.vm.vmsavedstate import VMSavedState from pycloud.pycloud.vm.virtualmachinedescriptor import VirtualMachineDescriptor from pycloud.pycloud.vm.vmutils import VirtualMachineException from pycloud.pycloud.utils import portmanager from pycloud.pycloud.cloudlet import get_cloudlet_instance from pycloud.pycloud.vm.vmutils import VirtualMachine from pycloud.pycloud.network import cloudlet_dns ################################################################################################################ # ################################################################################################################ class SVMNotFoundException(Exception): def __init__(self, message): super(SVMNotFoundException, self).__init__(message) self.message = message ################################################################################################################ # Represents a runtime ServiceVM, independent on whether it has a cloned or original disk image. ################################################################################################################ class ServiceVM(Model): # Meta class is needed so that minimongo can map this class onto the database. class Meta: collection = "service_vms" external = ['_id', 'service_id', 'running', 'port', 'ip_address', 'vnc_address', 'ssh_port', 'fqdn'] mapping = { 'vm_image': VMImage } # Constants. SSH_INTERNAL_PORT = 22 VM_NAME_PREFIX = 'VM' ################################################################################################################ # Constructor. ################################################################################################################ def __init__(self, *args, **kwargs): self._id = None self.name = None self.vm = VirtualMachine() self.vm_image = None self.os = 'lin' # By default, used when creating a new SVM only. self.port_mappings = {} self.service_port = None self.port = None # Used to show the external port self.ssh_port = None self.vnc_address = None self.vnc_port = None self.service_id = None self.ip_address = None self.mac_address = None self.running = False self.ready = False self.fqdn = None self.network_mode = None self.adapter = None self.num_current_users = 0 super(ServiceVM, self).__init__(*args, **kwargs) ################################################################################################################ # Finds all SVMs given some search criteria. ################################################################################################################ @staticmethod def find_all(search_dict={}, only_find_ready_ones=True, connect_to_vm=True): service_vms_array = [] if only_find_ready_ones: search_dict['ready'] = True svm_list = ServiceVM.find(search_dict) for service_vm in svm_list: if connect_to_vm: service_vm.connect_to_vm() else: service_vm.vm = None service_vms_array.append(service_vm) return service_vms_array ################################################################################################################ # Locate a ServiceVM by its ID ################################################################################################################ # noinspection PyBroadException @staticmethod def by_id(svm_id=None, only_find_ready_ones=True): try: search_dict = {'_id': svm_id} if only_find_ready_ones: search_dict['ready'] = True service_vm = ServiceVM.find_one(search_dict) except: return None if service_vm: service_vm.connect_to_vm() return service_vm ################################################################################################################ # Connects to a libvirt vm. ################################################################################################################ def connect_to_vm(self): self.vm = VirtualMachine() try: self.vm.connect_to_virtual_machine(self._id) except VirtualMachineException as e: print 'Error connecting to VM with id {}: {}'.format(self._id, e.message) self.vm = None ################################################################################################################ # ################################################################################################################ @staticmethod def by_service(service_id): service_vms = ServiceVM.find_all({'service_id': service_id}) return service_vms ################################################################################################################ # Cleanly and safely gets a ServiceVM and removes it from the database. ################################################################################################################ @staticmethod def find_and_remove(svm_id): # Find the right service and remove it. find_and_modify will only return the document with matching id return ServiceVM.find_and_modify(query={'_id': svm_id}, remove=True) ################################################################################################################ # Overridden method to avoid trying to store the VM object into the db. ################################################################################################################ def save(self, *args, **kwargs): vm = self.vm self.vm = None super(ServiceVM, self).save(*args, **kwargs) self.vm = vm ################################################################################################################ # Serializes the object safely into a json string. ################################################################################################################ def to_json_string(self): vm = self.vm self.vm = None json_string = json.dumps(self) self.vm = vm return json_string ################################################################################################################ # Sets the name based on the space in the XML string. ################################################################################################################ def set_default_name(self, xml_string=None): self.name = None default_new_name = self.VM_NAME_PREFIX + '-' + self._id if not xml_string: self.name = default_new_name else: original_name = VirtualMachineDescriptor.get_raw_name(xml_string) if VirtualMachineDescriptor.does_name_fit(xml_string, default_new_name): new_name = default_new_name else: print 'Truncating new VM name.' new_name = default_new_name[:len(original_name)] self.name = new_name print 'Original VM Name: {}'.format(original_name) print 'New VM Name: {}'.format(self.name) if not self.name: self.name = '' ################################################################################################################ # Generates a random ID, valid as a VM id. ################################################################################################################ def generate_random_id(self): self._id = str(uuid4()) ################################################################################################################ # Create a new service VM from a given template, and start it. ################################################################################################################ def create(self, vm_xml_template_file): # Check that the XML description file exists. if not os.path.exists(vm_xml_template_file): raise VirtualMachineException("VM description file %s for VM creation does not exist." % vm_xml_template_file) # Setup network params. self.setup_network() # Load the XML template and update it with this VM's information. template_xml_descriptor = open(vm_xml_template_file, "r").read() self.set_default_name() updated_xml_descriptor = self._update_descriptor(template_xml_descriptor) # Create a VM ("domain") through the hypervisor. self._cold_boot(updated_xml_descriptor) # Ensure network is working and load network data. self.load_network_data() self.register_with_dns() return self ################################################################################################################ # Start this service VM. ################################################################################################################ def start(self): # Check if we are already running. if self.running: return self # Setup network params. self.setup_network() # Make sure the hypervisor can write to our files (since the disk image will be modified by the VM). self.vm_image.unprotect() # Get the saved state and make sure it is populated saved_state = VMSavedState(self.vm_image.state_image) # Update the state image with the updated descriptor. # NOTE: this is only needed since libvirt wont allow us to change the ID of a VM being restored through its API. # Instead, we trick it by manually changing the ID of the saved state file, so the API won't know we changed it. raw_saved_xml_descriptor = saved_state.getRawStoredVmDescription() self.set_default_name(raw_saved_xml_descriptor) updated_xml_descriptor_id_only = VirtualMachineDescriptor.update_raw_name_and_id(raw_saved_xml_descriptor, self._id, self.name) saved_state.updateStoredVmDescription(updated_xml_descriptor_id_only) # Get the descriptor and update it to include the current disk image path, port mappings, etc. saved_xml_descriptor = saved_state.getStoredVmDescription() updated_xml_descriptor = self._update_descriptor(saved_xml_descriptor) # Restore a VM to the state indicated in the associated memory image file, in running mode. # The XML descriptor is given since some things need to be changed for the instance, mainly the disk image file and the mapped ports. try: print "Resuming from VM image..." VirtualMachine.restore_saved_vm(saved_state.savedStateFilename, updated_xml_descriptor) self.vm.connect_to_virtual_machine(self._id) print "Resumed from VM image." self.running = True self.ready = True except VirtualMachineException as e: # If we could not resume the VM, discard the memory state and try to boot the VM from scratch. print "Error resuming VM: %s for VM; error is: %s" % (str(self._id), str(e)) print "Discarding saved state and attempting to cold boot VM." # Simply try creating a new VM with the same disk and the updated XML descriptor from the saved state file. self._cold_boot(updated_xml_descriptor) # Ensure network is working and load network data. self.load_network_data() self.register_with_dns() # Check if the service is available, wait for it for a bit. # CURRENT IMPLEMENTATION ONLY WORKS IN BRIDGED MODE. if self.network_mode == "bridged": self._check_service() return self ################################################################################################################ # Updates an XML containing the description of the VM with the current info of this VM. ################################################################################################################ def _update_descriptor(self, saved_xml_descriptor): # Get the descriptor and inflate it to something we can work with. xml_descriptor = VirtualMachineDescriptor(saved_xml_descriptor) # Change the ID and Name (note: not currently that useful since they are changed in the saved state file). xml_descriptor.setUuid(self._id) xml_descriptor.setName(self.name) # Set the disk image in the description of the VM. xml_descriptor.setDiskImage(self.vm_image.disk_image, 'qcow2') # Disabling remote VNC access for now. xml_descriptor.enableLocalVNC() # Sets the Realtek network driver, needed for Windows-based VMs. if self.os != "lin": print "Setting Realtek network driver." xml_descriptor.setRealtekNetworkDriver() # Configure bridged mode if enabled if self.network_mode == "bridged": print 'Setting bridged mode' xml_descriptor.enableBridgedMode(self.adapter) # In bridge mode we need a new MAC in case we are a clone. print 'Setting mac address \'%s\'' % self.mac_address xml_descriptor.setMACAddress(self.mac_address) # Set external ports same as internal ones. self.port = self.service_port self.ssh_port = self.SSH_INTERNAL_PORT else: # No bridge mode, means we have to setup port forwarding. # Ensure we are not using bridged mode. xml_descriptor.enableNonBridgedMode(self.adapter) # Create a new port if we do not have an external port already. print 'Setting up port forwarding' if not self.port: self._add_port_mapping(portmanager.PortManager.generate_random_available_port(), self.service_port) if not self.ssh_port: self._add_port_mapping(portmanager.PortManager.generate_random_available_port(), self.SSH_INTERNAL_PORT) xml_descriptor.setPortRedirection(self.port_mappings) # Remove seclabel item. xml_descriptor.removeSecLabel() # Get the resulting XML string and return it.
lib.einsum('ijab,iabj', t2_new, eris_ovvo,optimize=True) del t2_new return e_mp def contract_ladder(myadc,t_amp,vvvv): log = logger.Logger(myadc.stdout, myadc.verbose) nocc = myadc._nocc nvir = myadc._nvir t_amp = np.ascontiguousarray(t_amp.reshape(nocc*nocc,nvir*nvir).T) t = np.zeros((nvir,nvir, nocc*nocc)) chnk_size = radc_ao2mo.calculate_chunk_size(myadc) a = 0 if isinstance(vvvv, list): for dataset in vvvv: k = dataset.shape[0] dataset = dataset[:].reshape(-1,nvir*nvir) t[a:a+k] = np.dot(dataset,t_amp).reshape(-1,nvir,nocc*nocc) a += k elif getattr(myadc, 'with_df', None): for p in range(0,nvir,chnk_size): vvvv_p = dfadc.get_vvvv_df(myadc, vvvv, p, chnk_size) k = vvvv_p.shape[0] vvvv_p = vvvv_p.reshape(-1,nvir*nvir) t[a:a+k] = np.dot(vvvv_p,t_amp).reshape(-1,nvir,nocc*nocc) del vvvv_p a += k else : raise Exception("Unknown vvvv type") del t_amp t = np.ascontiguousarray(t.transpose(2,0,1)).reshape(nocc, nocc, nvir, nvir) return t def density_matrix(myadc, T=None): if T is None: T = RADCIP(myadc).get_trans_moments() nocc = myadc._nocc nvir = myadc._nvir n_singles = nocc n_doubles = nvir * nocc * nocc ij_ind = np.tril_indices(nocc, k=-1) s1 = 0 f1 = n_singles s2 = f1 f2 = s2 + n_doubles T_doubles = T[:,n_singles:] T_doubles = T_doubles.reshape(-1,nvir,nocc,nocc) T_doubles_transpose = T_doubles.transpose(0,1,3,2).copy() T_bab = (2/3)*T_doubles + (1/3)*T_doubles_transpose T_aaa = T_bab - T_bab.transpose(0,1,3,2) T_a = T[:,s1:f1] T_bab = T_bab.reshape(-1,n_doubles) T_aaa = T_aaa.reshape(-1,n_doubles) dm = 2 * np.dot(T_a,T_a.T) + np.dot(T_aaa, T_aaa.T) + 2 * np.dot(T_bab, T_bab.T) return dm def analyze(myadc): str = ("\n*************************************************************" "\n Eigenvector analysis summary" "\n*************************************************************") logger.info(myadc, str) myadc.analyze_eigenvector() if myadc.compute_properties: str = ("\n*************************************************************" "\n Spectroscopic factors analysis summary" "\n*************************************************************") logger.info(myadc, str) myadc.analyze_spec_factor() def compute_dyson_mo(myadc): X = myadc.X if X is None: nroots = myadc.U.shape[1] P,X = myadc.get_properties(nroots) nroots = X.shape[1] dyson_mo = np.dot(myadc.mo_coeff,X) return dyson_mo class RADC(lib.StreamObject): '''Ground state calculations Attributes: verbose : int Print level. Default value equals to :class:`Mole.verbose` max_memory : float or int Allowed memory in MB. Default value equals to :class:`Mole.max_memory` incore_complete : bool Avoid all I/O. Default is False. method : string nth-order ADC method. Options are : ADC(2), ADC(2)-X, ADC(3). Default is ADC(2). >>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz') >>> mf = scf.RHF(mol).run() >>> myadc = adc.RADC(mf).run() Saved results e_corr : float MPn correlation correction e_tot : float Total energy (HF + correlation) t1, t2 : T amplitudes t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt) ''' incore_complete = getattr(__config__, 'adc_radc_RADC_incore_complete', False) async_io = getattr(__config__, 'adc_radc_RADC_async_io', True) blkmin = getattr(__config__, 'adc_radc_RADC_blkmin', 4) memorymin = getattr(__config__, 'adc_radc_RADC_memorymin', 2000) def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None): from pyscf import gto if 'dft' in str(mf.__module__): raise NotImplementedError('DFT reference for UADC') if mo_coeff is None: mo_coeff = mf.mo_coeff if mo_occ is None: mo_occ = mf.mo_occ self.mol = mf.mol self._scf = mf self.verbose = self.mol.verbose self.stdout = self.mol.stdout self.max_memory = mf.max_memory self.max_space = getattr(__config__, 'adc_radc_RADC_max_space', 12) self.max_cycle = getattr(__config__, 'adc_radc_RADC_max_cycle', 50) self.conv_tol = getattr(__config__, 'adc_radc_RADC_conv_tol', 1e-12) self.tol_residual = getattr(__config__, 'adc_radc_RADC_tol_res', 1e-6) self.scf_energy = mf.e_tot self.frozen = frozen self.incore_complete = self.incore_complete or self.mol.incore_anyway self.mo_coeff = mo_coeff self.mo_occ = mo_occ self.e_corr = None self.t1 = None self.t2 = None self.imds = lambda:None self._nocc = mf.mol.nelectron//2 self._nmo = mo_coeff.shape[1] self._nvir = self._nmo - self._nocc self.mo_energy = mf.mo_energy self.chkfile = mf.chkfile self.method = "adc(2)" self.method_type = "ip" self.with_df = None self.compute_properties = True self.evec_print_tol = 0.1 self.spec_factor_print_tol = 0.1 self.E = None self.U = None self.P = None self.X = None keys = set(('tol_residual','conv_tol', 'e_corr', 'method', 'mo_coeff', 'mol', 'mo_energy', 'max_memory', 'incore_complete', 'scf_energy', 'e_tot', 't1', 'frozen', 'chkfile', 'max_space', 't2', 'mo_occ', 'max_cycle')) self._keys = set(self.__dict__.keys()).union(keys) compute_amplitudes = compute_amplitudes compute_energy = compute_energy transform_integrals = radc_ao2mo.transform_integrals_incore make_rdm1 = density_matrix def dump_flags(self, verbose=None): logger.info(self, '') logger.info(self, '******** %s ********', self.__class__) logger.info(self, 'max_space = %d', self.max_space) logger.info(self, 'max_cycle = %d', self.max_cycle) logger.info(self, 'conv_tol = %s', self.conv_tol) logger.info(self, 'max_memory %d MB (current use %d MB)', self.max_memory, lib.current_memory()[0]) return self def dump_flags_gs(self, verbose=None): logger.info(self, '') logger.info(self, '******** %s ********', self.__class__) logger.info(self, 'max_memory %d MB (current use %d MB)', self.max_memory, lib.current_memory()[0]) return self def kernel_gs(self): assert(self.mo_coeff is not None) assert(self.mo_occ is not None) self.method = self.method.lower() if self.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(self.method) if self.verbose >= logger.WARN: self.check_sanity() self.dump_flags_gs() nmo = self._nmo nao = self.mo_coeff.shape[0] nmo_pair = nmo * (nmo+1) // 2 nao_pair = nao * (nao+1) // 2 mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6 mem_now = lib.current_memory()[0] if getattr(self, 'with_df', None) or getattr(self._scf, 'with_df', None): if getattr(self, 'with_df', None): self.with_df = self.with_df else : self.with_df = self._scf.with_df def df_transform(): return radc_ao2mo.transform_integrals_df(self) self.transform_integrals = df_transform elif (self._scf._eri is None or (mem_incore+mem_now >= self.max_memory and not self.incore_complete)): def outcore_transform(): return radc_ao2mo.transform_integrals_outcore(self) self.transform_integrals = outcore_transform eris = self.transform_integrals() self.e_corr, self.t1, self.t2 = compute_amplitudes_energy(self, eris=eris, verbose=self.verbose) self._finalize() return self.e_corr, self.t1, self.t2 def kernel(self, nroots=1, guess=None, eris=None): assert(self.mo_coeff is not None) assert(self.mo_occ is not None) self.method = self.method.lower() if self.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(self.method) if self.verbose >= logger.WARN: self.check_sanity() self.dump_flags_gs() nmo = self._nmo nao = self.mo_coeff.shape[0] nmo_pair = nmo * (nmo+1) // 2 nao_pair = nao * (nao+1) // 2 mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6 mem_now = lib.current_memory()[0] if getattr(self, 'with_df', None) or getattr(self._scf, 'with_df', None): if getattr(self, 'with_df', None): self.with_df = self.with_df else : self.with_df = self._scf.with_df def df_transform(): return radc_ao2mo.transform_integrals_df(self) self.transform_integrals = df_transform elif (self._scf._eri is None or (mem_incore+mem_now >= self.max_memory and not self.incore_complete)): def outcore_transform(): return radc_ao2mo.transform_integrals_outcore(self) self.transform_integrals = outcore_transform eris = self.transform_integrals() self.e_corr, self.t1, self.t2 = compute_amplitudes_energy(self, eris=eris, verbose=self.verbose) self._finalize() self.method_type = self.method_type.lower() if(self.method_type == "ea"): e_exc, v_exc, spec_fac, x, adc_es = self.ea_adc(nroots=nroots, guess=guess, eris=eris) elif(self.method_type == "ip"): e_exc, v_exc, spec_fac, x, adc_es = self.ip_adc(nroots=nroots, guess=guess, eris=eris) else: raise NotImplementedError(self.method_type) self._adc_es = adc_es return e_exc, v_exc, spec_fac, x def _finalize(self): '''Hook for dumping results and clearing up the object.''' logger.note(self, 'E_corr = %.8f', self.e_corr) return self def ea_adc(self, nroots=1, guess=None, eris=None): adc_es = RADCEA(self) e_exc, v_exc, spec_fac, x = adc_es.kernel(nroots, guess, eris) return e_exc, v_exc, spec_fac, x, adc_es def ip_adc(self, nroots=1, guess=None, eris=None): adc_es = RADCIP(self) e_exc, v_exc, spec_fac, x = adc_es.kernel(nroots, guess, eris) return e_exc, v_exc, spec_fac, x, adc_es def density_fit(self, auxbasis=None, with_df = None): if with_df is None: self.with_df = df.DF(self._scf.mol) self.with_df.max_memory = self.max_memory self.with_df.stdout = self.stdout self.with_df.verbose = self.verbose if auxbasis is None: self.with_df.auxbasis = self._scf.with_df.auxbasis else : self.with_df.auxbasis = auxbasis else : self.with_df = with_df return self def analyze(self): self._adc_es.analyze() def compute_dyson_mo(self): return self._adc_es.compute_dyson_mo() def get_imds_ea(adc, eris=None): cput0 = (time.clock(), time.time()) log = logger.Logger(adc.stdout, adc.verbose) if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) method = adc.method t1 = adc.t1 t2 = adc.t2 t1_2 = t1[0] eris_ovvo = eris.ovvo nocc = adc._nocc nvir = adc._nvir e_occ = adc.mo_energy[:nocc].copy() e_vir = adc.mo_energy[nocc:].copy() idn_occ = np.identity(nocc) idn_vir = np.identity(nvir) if eris is None: eris = adc.transform_integrals() # a-b block # Zeroth-order terms M_ab = lib.einsum('ab,a->ab', idn_vir, e_vir) # Second-order terms t2_1 = t2[0][:] M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab -= lib.einsum('l,lmad,mlbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab -= lib.einsum('l,mlad,lmbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,lmad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab += 0.5 * lib.einsum('d,lmad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab += 0.5 * lib.einsum('d,mlad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,mlad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,lmad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,mlad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab_t = lib.einsum('lmad,lmbd->ab', t2_1,t2_1, optimize=True) M_ab -= 1 * lib.einsum('a,ab->ab',e_vir,M_ab_t,optimize=True) M_ab -= 1 * lib.einsum('b,ab->ab',e_vir,M_ab_t,optimize=True) M_ab_t = lib.einsum('lmad,mlbd->ab', t2_1,t2_1, optimize=True) M_ab += 0.5 * lib.einsum('a,ab->ab',e_vir,M_ab_t,optimize=True) M_ab += 0.5 * lib.einsum('b,ab->ab',e_vir,M_ab_t,optimize=True) del M_ab_t M_ab -= 0.5 * lib.einsum('lmad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmad,ldbm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlad,ldbm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('lmbd,ladm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlbd,ladm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmbd,ldam->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlbd,ldam->ab',t2_1, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmbd,ladm->ab',t2_1, eris_ovvo,optimize=True) del t2_1 cput0 = log.timer_debug1("Completed M_ab second-order terms ADC(2) calculation", *cput0) #Third-order terms if(method =='adc(3)'): eris_oovv = eris.oovv eris_oooo = eris.oooo if isinstance(eris.ovvv, type(None)): chnk_size = radc_ao2mo.calculate_chunk_size(adc) else : chnk_size = nocc a = 0 for p in range(0,nocc,chnk_size): if
from __future__ import print_function import argparse import torch import torch.utils.data from torch import nn, optim from torch.autograd import Variable from torch.nn import functional as F from torchvision import datasets, transforms from torchvision.utils import save_image from torch.utils import model_zoo from torchvision import models import scipy from miscc.datasets import TextDataset from miscc.config import cfg from model import STAGE1_G, STAGE1_D, STAGE1_ImageEncoder import pdb parser = argparse.ArgumentParser(description='VAE MNIST Example') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='input batch size for training (default: 128)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--no-cuda', action='store_true', default=False, help='enables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() DATA_DIR = "../data/flowers" cfg.TEXT.DIMENSION = 300 cfg.GAN.CONDITION_DIM = 128 cfg.GAN.DF_DIM = 96 cfg.GAN.GF_DIM = 192 torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} # train_loader = torch.utils.data.DataLoader( # datasets.MNIST('../data', train=True, download=True, # transform=transforms.ToTensor()), # batch_size=args.batch_size, shuffle=True, **kwargs) # test_loader = torch.utils.data.DataLoader( # datasets.MNIST('../data', train=False, transform=transforms.ToTensor()), # batch_size=args.batch_size, shuffle=True, **kwargs) image_transform = transforms.Compose([ transforms.RandomCrop(64), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = TextDataset(DATA_DIR, 'train', imsize=64, transform=image_transform) assert train_dataset train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, drop_last=True, shuffle=True) ###### image_transform = transforms.Compose([ transforms.RandomCrop(64), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) test_dataset = TextDataset(DATA_DIR, 'test', imsize=64, transform=image_transform) assert test_dataset test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=args.batch_size , drop_last=True, shuffle=True) #imsize = 784 imsize = 64 * 64 * 3 #imsize = 16 * 16 * 3 zsize = 300 def conv1x1(in_planes, out_planes, bias=False): "1x1 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class CNN_ENCODER(nn.Module): def __init__(self, nef): super(CNN_ENCODER, self).__init__() if cfg.TRAIN.FLAG: self.nef = nef else: self.nef = 256 # define a uniform ranker model = models.inception_v3() url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth' model.load_state_dict(model_zoo.load_url(url)) for param in model.parameters(): param.requires_grad = False print('Load pretrained model from ', url) # print(model) self.define_module(model) self.init_trainable_weights() def define_module(self, model): self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3 self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3 self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3 self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1 self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3 self.Mixed_5b = model.Mixed_5b self.Mixed_5c = model.Mixed_5c self.Mixed_5d = model.Mixed_5d self.Mixed_6a = model.Mixed_6a self.Mixed_6b = model.Mixed_6b self.Mixed_6c = model.Mixed_6c self.Mixed_6d = model.Mixed_6d self.Mixed_6e = model.Mixed_6e self.Mixed_7a = model.Mixed_7a self.Mixed_7b = model.Mixed_7b self.Mixed_7c = model.Mixed_7c self.emb_features = conv1x1(768, self.nef) self.emb_cnn_code = nn.Linear(2048, self.nef) def init_trainable_weights(self): initrange = 0.1 self.emb_features.weight.data.uniform_(-initrange, initrange) self.emb_cnn_code.weight.data.uniform_(-initrange, initrange) def forward(self, x): features = None # --> fixed-size input: batch x 3 x 299 x 299 x = nn.Upsample(size=(299, 299), mode='bilinear')(x) # 299 x 299 x 3 x = self.Conv2d_1a_3x3(x) # 149 x 149 x 32 x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32 x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64 x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64 x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80 x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192 x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192 x = self.Mixed_5b(x) # 35 x 35 x 256 x = self.Mixed_5c(x) # 35 x 35 x 288 x = self.Mixed_5d(x) # 35 x 35 x 288 x = self.Mixed_6a(x) # 17 x 17 x 768 x = self.Mixed_6b(x) # 17 x 17 x 768 x = self.Mixed_6c(x) # 17 x 17 x 768 x = self.Mixed_6d(x) # 17 x 17 x 768 x = self.Mixed_6e(x) # 17 x 17 x 768 # image region features features = x # 17 x 17 x 768 x = self.Mixed_7a(x) # 8 x 8 x 1280 x = self.Mixed_7b(x) # 8 x 8 x 2048 x = self.Mixed_7c(x) # 8 x 8 x 2048 x = F.avg_pool2d(x, kernel_size=8) # 1 x 1 x 2048 # x = F.dropout(x, training=self.training) # 1 x 1 x 2048 x = x.view(x.size(0), -1) # 2048 # global image features cnn_code = self.emb_cnn_code(x) # 512 if features is not None: features = self.emb_features(features) return features, cnn_code class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(imsize, 400) self.fc21 = nn.Linear(400, 50) self.fc22 = nn.Linear(400, 50) self.fc3 = nn.Linear(50, 400) self.fc4 = nn.Linear(400, imsize) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.netG = STAGE1_G() # state_dict = \ # torch.load("../output/birds_stageI_2018_03_19_15_55_52/Model/netG_epoch_120.pth", # map_location=lambda storage, loc: storage) # self.netG.load_state_dict(state_dict) # print('Load from: ', cfg.NET_G) #self.image_encoder = CNN_ENCODER(128) self.image_encoder = STAGE1_ImageEncoder() # state_dict = \ # torch.load("../output/birds_stageI_2018_03_19_15_55_52/Model/image_encoder_epoch_120.pth", # map_location=lambda storage, loc: storage) # self.image_encoder.load_state_dict(state_dict) ndf, nef = 60, 128 self.nef = nef self.decode_lin = nn.Sequential( nn.Linear(zsize, nef * 4 * 4), nn.BatchNorm1d(nef * 4 * 4), nn.ReLU(True) ) self.decode_img = nn.Sequential( nn.ConvTranspose2d(nef, nef // 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef // 2), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d(nef // 2, nef // 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef // 4), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d(nef // 4, nef // 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef // 8), nn.LeakyReLU(0.2, inplace=True), nn.ConvTranspose2d(nef // 8, nef // 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef // 16), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef // 16, 3, 3, 1, 1), nn.Sigmoid() ) self.encode_img = nn.Sequential( nn.Conv2d(3, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size (ndf*2) x 16 x 16 nn.Conv2d(ndf*2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size (ndf*4) x 8 x 8 nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), # state size (ndf * 8) x 4 x 4) nn.LeakyReLU(0.2, inplace=True) #nn.MaxPool2d(2, stride=2), #nn.Linear(1024, 300) ) self.l1 = nn.Linear(480 * 4 * 4, zsize) self.l2 = nn.Linear(480 * 4 * 4, zsize) self.l = nn.Linear(128, zsize * 2) self.relu = nn.ReLU() ####### def encode(self, x): # h1 = self.relu(self.fc1(x)) # return self.fc21(h1), self.fc22(h1) #hidden = self.encode_img(x) encoded = self.image_encoder(x) fake_img_feats, fake_img_emb, fake_img_code, mu, logvar = encoded #_, encoded = self.image_encoder(x) #line = self.relu(self.l(encoded)) #mu = line[:,:300] #logvar = line[:,300:] return mu, logvar def reparameterize(self, mu, logvar): #if self.training: if True: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def decode(self, z): # h3 = self.relu(self.fc3(z)) # return self.sigmoid(self.fc4(h3)) _, fake_img, _, _ = self.netG(z, None) img = self.sigmoid(fake_img) return img def forward(self, x): mu, logvar = self.encode(x)#.view(-1, imsize)) z = self.reparameterize(mu, logvar) #z_p = self.decode_lin(z).view(-1, self.nef, 4, 4) #img = self.decode_img(z_p) #z = self.encode_img(x).view(-1, 480 * 4 * 4) img = self.decode(z) # mu = None # logvar = None return img, mu, logvar model = VAE() netD = STAGE1_D() if args.cuda: model.cuda() netD.cuda() model_params = filter(lambda p: p.requires_grad, model.parameters()) optimizer = optim.Adam(model_params, lr=1e-3) optd = optim.Adam(netD.parameters(), lr=1e-4) # Reconstruction + KL divergence losses summed over all elements and batch def loss_function(recon_x, x, mu, logvar): #pdb.set_trace() #BCE = F.binary_cross_entropy(recon_x, x, size_average=False) BCE = F.smooth_l1_loss(recon_x, F.sigmoid(x), size_average=False) # see Appendix B from VAE paper: # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) #KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.sum(KLD_element).mul_(-0.5) return BCE + KLD, BCE, KLD #return BCE, BCE, KLD def train(epoch): #criterion = nn.BCEWithLogitsLoss() criterion = nn.BCELoss() model.train() train_loss = 0 for batch_idx, data in enumerate(train_loader): _, real_img_cpu, _, _, _ = data data = real_img_cpu data = Variable(data) if args.cuda: data = data.cuda() optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss, bce, kld = loss_function(recon_batch, data, mu, logvar) # #real_feats_g = netD(data) # #real_logits_g = netD.get_uncond_logits(real_feats_g) # fake_feats_g = netD(recon_batch) # fake_logits_g = netD.get_uncond_logits(fake_feats_g) # real_labels_g = Variable(torch.FloatTensor(args.batch_size).fill_(0.9)).cuda() # #fake_labels_g = Variable(torch.FloatTensor(args.batch_size).fill_(0)).cuda() # loss_fake_g = criterion(F.sigmoid(fake_logits_g), real_labels_g) # #loss_real_g = criterion(F.sigmoid(real_logits_g), fake_labels_g) # loss_g = loss_fake_g # loss = loss + loss_g loss.backward() train_loss += loss.data[0] optimizer.step() # optd.zero_grad() # real_feats = netD(data.detach()) # real_logits = netD.get_uncond_logits(real_feats) # fake_feats = netD(recon_batch.detach()) # fake_logits = netD.get_uncond_logits(fake_feats) # real_labels = Variable(torch.FloatTensor(args.batch_size).fill_(0.9)).cuda() # fake_labels = Variable(torch.FloatTensor(args.batch_size).fill_(0.1)).cuda() # loss_fake = criterion(F.sigmoid(fake_logits), fake_labels) # loss_real = criterion(F.sigmoid(real_logits), real_labels) # d_loss = loss_fake + loss_real # d_loss.backward() # optd.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f} {:.4f} {:.4f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx
X, y=None, params={}, n_splits=0): # checkpoint_predictions = [] # weights = [] if mode == "train": self.run_model_train(model, X, y, params, n_splits > 1, n_splits) elif mode == "predict": pred = model.predict(X, verbose=2, batch_size=BATCH_SIZE) return pred # if predict_ones_with_identity: # return model.predict(self.train_X_identity, verbose=2, batch_size=BATCH_SIZE) def locate_data_in_np_train(self, index): """ :param index: must be for the index of range 405130 :return: """ return self.train_X[index], self.train_y[index], self.train_y_aux[index] def locate_subgroup_index_in_np_train(self, subgroup): df = self.id_validate_df index = df[df[subgroup]].index return index def locate_subgroup_data_in_np_train(self, subgroup): """ :param index: must be for the index of range 405130 :return: """ index = self.locate_subgroup_index_in_np_train(subgroup) return self.locate_data_in_np_train(index) def to_identity_index(self, index): """ :param index: from 1.8m range index :return: to 0.4 m range index in identity data """ df = self.id_validate_df # selected the items return [df.index.get_loc(label) for label in index] def _get_identities(self): """ No need to use this function, all identities are marked :return: """ prefix = self.emb.BIN_FOLDER # if os.path.isfile(prefix+'train_df.pd'): if False: self.train_df = pickle.load(open(prefix + "train_df.pd", "rb")) else: for g in d.IDENTITY_COLUMNS: pred = pickle.load(open(f"{prefix}_{g}_pred.pkl", "rb")) self.train_df[f"{g}_pred"] = pred for g in d.IDENTITY_COLUMNS: self.train_df.loc[self.identity_idx, f"{g}_pred"] = self.train_df.loc[ self.identity_idx, g ] def get_identities_for_training(self): if not self.id_used_in_train: # if not FINAL_SUBMIT: # in test set, around 10% data will be with identities (lower than training set) logger.debug("Use 90% identity data") id_df = self.train_df.loc[self.identity_idx] # 40,000 remained for val id_train_df = id_df.sample(frac=0.9, random_state=2019) id_train_df_idx = id_train_df.index # else: # we still need these ... for the early stop thing!!! # logger.debug("Use 100% identity data") # in test set, around 10% data will be with identities (lower than training set) # id_train_df_idx = self.identity_idx self.train_mask[id_train_df_idx] = 1 self.id_used_in_train = True for g in d.IDENTITY_COLUMNS: # column to keep recored what data is used in training, used in data_prepare module... self.train_df[g + "_in_train"] = 0.0 # only the ones larger than 0.5 will ? how about negative example? self.train_df[g + "_in_train"].loc[id_train_df_idx] = self.train_df[ g ].loc[id_train_df_idx] def prepare_weight_for_subgroup_balance(self): """ to see how other people handle weights [this kernel](https://www.kaggle.com/thousandvoices/simple-lstm) sample_weights = np.ones(len(x_train), dtype=np.float32) # more weights for the ones with identities, more identities, more weights sample_weights += train_df[IDENTITY_COLUMNS].sum(axis=1) # the toxic ones, reverse identity (without identity)(average 4~8), so more weights on toxic one without identity sample_weights += train_df[TARGET_COLUMN] * (~train_df[IDENTITY_COLUMNS]).sum(axis=1) # none toxic, non-toxic, with identity, more weight for this, so the weights are more or less balanced sample_weights += (~train_df[TARGET_COLUMN]) * train_df[IDENTITY_COLUMNS].sum(axis=1) * 5 sample_weights /= sample_weights.mean() And we know the identies now, so we balance all the ones, for every subgroup, we calculate the related weight to balance """ self.train_df = self.emb.train_df # self._get_identities() # for known ones, just skip analyzer = d.TargetDistAnalyzer(self.train_df) self.target_analyzer = analyzer o = analyzer.get_distribution_overall() self.get_identities_for_training() # for subgroup, use 0.5 as the limit, continuous info not used... anyway, we try first gs = analyzer.get_distribution_subgroups() # or 1->0.8 slop or 0.8->1, or y=-(x-1/2)^2+1/4; just test balance_scheme_subgroups = BALANCE_SCHEME_SUBGROUPS # or 1->0.8 slop or 0.8->1, or y=-(x-1/2)^2+1/4; just test balance_scheme_across_subgroups = BALANCE_SCHEME_ACROSS_SUBGROUPS # balance_scheme_target_splits = 'target_bucket_same_for_target_splits' # or 1->0.8 slop or 0.8->1, or y=-(x-1/2)^2+1/4; just test # not work, because manual change will corrupt orignial information? balance_scheme_target_splits = BALANCE_SCHEME_TARGET_SPLITS balance_AUC = BALANCE_SCHEME_AUC # need a parameter for all pos v.s. neg., and for different def add_weight(balance_scheme_subgroups, balance_group=False): # target value, how do we balance? # (First we equalize them, then re-balance), just try different balance ones_weights = np.ones(len(self.train_df), dtype=np.float32) # sample_weights = ones_weights.copy() gs_weights_ratio = {} gs_weights = {} background_target_ratios = np.array([dstr[2] for dstr in o]) if balance_scheme_subgroups == "target_bucket_same_for_subgroups": # compare with the background one, then change the weights to the same scale for g, v in gs.items(): gs_weights[g] = ones_weights.copy() # initial, ones # v is the distribution for ONE subgroup for 0~1 11 target types gs_weights_ratio[g] = np.divide( background_target_ratios, np.array( [dstr[2] for dstr in v]) ) for target_split_idx, ratio in enumerate(gs_weights_ratio[g]): # [3] is the index split_idx_in_df = v[target_split_idx][3] gs_weights[g][split_idx_in_df] *= ratio # or 1->0.8 slop or 0.8->1, or y=-(x-1/2)^2+1/4; just test if balance_scheme_across_subgroups == "more_for_low_score": subgroup_weights = {} subgroup_weights["homosexual_gay_or_lesbian"] = 4 subgroup_weights["black"] = 3 subgroup_weights["white"] = 3 subgroup_weights["muslim"] = 2.5 subgroup_weights["jewish"] = 4 """ 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness' """ for g in subgroup_weights.keys(): subgroup_dist = gs[g] for dstr in subgroup_dist: split_idx_in_df = dstr[3] # the ones with identities will be added gs_weights[g][split_idx_in_df] *= subgroup_weights[g] # shape will be [sample nubmers , subgroups] as some sample might be in two groups weights_changer = np.transpose([v for v in gs_weights.values()]) weights_changer_max = np.amax(weights_changer, axis=1) weights_changer_min = np.amin(weights_changer, axis=1) weights_changer_mean = np.mean(weights_changer, axis=1) weights_changer_merged = ones_weights.copy() weights_changer_merged[weights_changer_mean > 1] = weights_changer_max[ weights_changer_mean > 1 ] weights_changer_merged[weights_changer_mean < 1] = weights_changer_min[ weights_changer_mean < 1 ] sample_weights = weights_changer_merged if balance_AUC == "more_bp_sn": # self.train_df, contains all info benchmark_base = ( self.train_df[d.IDENTITY_COLUMNS + [d.TARGET_COLUMN, d.TEXT_COLUMN]] .fillna(0) .astype(np.bool) ) # the idx happen to be the iloc value judge = d.BiasBenchmark(benchmark_base, threshold=0.5) # converted to binary in judge initailization function id_validate_df = judge.validate_df toxic_bool_col = id_validate_df[d.TARGET_COLUMN] contain_identity_bool_col = id_validate_df[d.IDENTITY_COLUMNS].any( axis=1 ) weights_auc_balancer = ones_weights.copy() / 4 # for subgroup postitive, will be 0.5 weight weights_auc_balancer[contain_identity_bool_col] += 1 / 4 # BPSN, BP part (0.5 weights) weights_auc_balancer[toxic_bool_col & ~contain_identity_bool_col] += ( 1 / 4 ) # still BPSN, SN part (0.75 weights) weights_auc_balancer[~toxic_bool_col & contain_identity_bool_col] += ( 1 / 4 ) sample_weights = np.multiply( sample_weights, weights_auc_balancer) wanted_split_ratios = None if balance_scheme_target_splits == "target_bucket_same_for_target_splits": wanted_split_ratios = [1 / len(background_target_ratios)] * len( background_target_ratios ) elif balance_scheme_target_splits == "target_bucket_extreme_positive": # 0 0.1 0.2 0.3 ... 1 # good wanted_split_ratios = [2, 2, 2, 2, 2, 10, 15, 20, 20, 15, 10] if wanted_split_ratios is not None: assert len(wanted_split_ratios) == len( background_target_ratios) for target_split_idx, ratio in enumerate(background_target_ratios): idx_for_split = o[target_split_idx][3] # 1/len(b_t_r) is what we want sample_weights[idx_for_split] *= ( wanted_split_ratios[target_split_idx] / ratio ) sample_weights /= sample_weights.mean() # normalize return sample_weights weights = add_weight(balance_scheme_subgroups) return weights def prepare_train_labels( self, train_y_all, train_mask, custom_weights=False, with_aux=False, train_y_aux=None, sample_weights=None, fortify_subgroup=None, ): val_mask = np.invert(kernel.train_mask) # this is whole train_mask if fortify_subgroup is not None: # only use the subgroup data train_mask = train_mask & ( self.train_df[fortify_subgroup + "_in_train"] >= 0.5 ) train_X = kernel.train_X_all[train_mask] val_X = kernel.train_X_all[val_mask] if not custom_weights: if with_aux: return ( train_X, val_X, train_y_all[train_mask], train_y_aux[train_mask], train_y_all[val_mask], train_y_aux[val_mask], ) else: return train_X, val_X, train_y_all[train_mask], train_y_all[val_mask] else: # credit to https://www.kaggle.com/tanreinama/simple-lstm-using-identity-parameters-solution if sample_weights is None: raise RuntimeError( "sample weights cannot be None if use custom_weights" ) assert len(train_y_all) == len(sample_weights) if with_aux: return ( train_X, val_X, np.vstack([train_y_all, sample_weights]).T[train_mask], train_y_aux[train_mask], train_y_all[val_mask], train_y_aux[val_mask], ) else: return ( train_X, val_X, np.vstack([train_y_all, sample_weights]).T[train_mask], train_y_all[val_mask], ) def res_combine_pred_print_result( self, subgroup, y_pred, y_res_pred, idx_train, idx_val, detail=False ): id_df = copy.deepcopy(self.judge.validate_df) assert len(idx_train) + len(idx_val) == len(id_df[id_df[subgroup]]) assert id_df.shape[0] == len(y_pred) model_name = "res_" + subgroup # there are comments mention two identity, so our way might not be good id_df[model_name] = y_pred # not iloc, both are index from the 1.8 Million data id_df.loc[idx_val, id_df.columns.get_loc(model_name)] = y_res_pred logger.debug( f"Res update for {subgroup}, {len(idx_val)} items predicted by res model" ) self.calculate_metrics_and_print( validate_df_with_preds=id_df, model_name=model_name, detail=detail, file_for_print="metrics_log.txt", ) def run_bias_auc_model(self): """ need to prepare data, then train network to handle the bias thing we use data (identity(given value), comment text) as feature, to recalculate target, and reduce bias after build right model, then use predicted features to do the same prediction :return: """ pass def load_identity_data_idx(self): if self.identity_idx is None: # to train the identity ( self.train_X_identity, self.train_y_identity, self.identity_idx, ) = self.emb.get_identity_train_data_df_idx() def calculate_metrics_and_print( self, filename_for_print="metrics_log.txt", preds=None, threshold=0.5, validate_df_with_preds=None, model_name="lstm", detail=True, benchmark_base=None, ): file_for_print = open(filename_for_print, "w") self.emb.read_train_test_df(train_only=True) self.load_identity_data_idx() if benchmark_base is None: benchmark_base = self.train_df.loc[self.identity_idx] # if self.judge is None: # no .... different threshold need to recalculate in the new judge # the idx happen to be the iloc value self.judge = d.BiasBenchmark(benchmark_base, threshold=threshold) self.id_validate_df = self.judge.validate_df if model_name == d.MODEL_NAME: if preds is not None: logger.debug(f"{model_name} result for {len(preds)} items:") if validate_df_with_preds is not None:
the image with.") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', deprecate_info=c.deprecate(expiration='2.1.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p']) c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help="image sku's version") c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show all vm size supporting availability zones") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help="show all information including vm sizes not available under the current subscription") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart
# -*- coding: utf-8 -*- """ Created on Thu Aug 22 15:58:31 2019 @author: DaniJ """ import numpy as np import scipy as sp from bvp import solve_bvp from scipy import linalg #from four_layer_model_2try_withFixSpeciesOption_Scaling import four_layer_model_2try_withFixSpeciesOption_Scaling as flm from matplotlib import pyplot as plt ''' In this first try we will assume that the vector of unknowns is composed in the following order: ''' def PB_and_fourlayermodel (T, X_guess, A, Z, log_k, idx_Aq, pos_psi_S1_vec, pos_psi_S2_vec, temp, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, d0,df, idx_fix_species = None, zel=1, tolerance_NR = 1e-6, max_iterations = 100,scalingRC = True, tol_PB=1e-6): counter_iterations = 0 abs_err = tolerance_NR + 1 if idx_fix_species != None: X_guess [idx_fix_species] = T [idx_fix_species] tempv = (np.linspace(d0,df,100)); y_0 = np.zeros((2,tempv.shape[0])) bvp_class = [y_0] while abs_err>tolerance_NR and counter_iterations < max_iterations: # Calculate Y [Y, T, bvp_class] = func_NR_FLM (X_guess, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, d0,df, bvp_class, idx_fix_species,tol_PB) # Calculate Z J = Jacobian_NR_FLM (X_guess, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, d0,df, bvp_class, idx_fix_species,tol_PB) # Scaling technique is the RC technique from "Thermodynamic Equilibrium Solutions Through a Modified Newton Raphson Method"-<NAME>, <NAME>, <NAME>, and <NAME> (2016) if scalingRC == True: D1 = diagonal_row(J) D2 = diagonal_col(J) J_new = np.matmul(D1,np.matmul(J, D2)) Y_new = np.matmul(D1, Y) delta_X_new = linalg.solve(J_new,-Y_new) delta_X = np.matmul(D2, delta_X_new) else: # Calculating the diff, Delta_X delta_X = linalg.solve(J,-Y) #print(delta_X)) # Relaxation factor borrow from <NAME> to avoid negative values max_1 = 1 max_2 =np.amax(-2*np.multiply(delta_X, 1/X_guess)) Max_f = np.amax([max_1, max_2]) Del_mul = 1/Max_f X_guess=X_guess + Del_mul*delta_X #print(X_guess) Xmod=X_guess.copy() for i in range(len(X_guess)): if X_guess[i]<=0: Xmod[i]=1 log_C = log_k + np.matmul(A,np.log10(Xmod)) # transf C = 10**(log_C) u = np.matmul(A.transpose(),C) #print(C) # Vector_error d = u-T print(d) if idx_fix_species != None: d[idx_fix_species] =0 abs_err = max(abs(d)) counter_iterations += 1 if counter_iterations >= max_iterations: raise ValueError('Max number of iterations surpassed.') #return X_guess, C # Speciation - mass action law Xmod=X_guess.copy() for i in range(len(X_guess)): if X_guess[i]<=0: Xmod[i]=1 log_C = log_k + np.matmul(A,np.log10(Xmod)) # transf C = 10**(log_C) return X_guess, C def func_NR_FLM (X, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Z, zel, pos_psi_S1_vec, pos_psi_S2_vec, d0,df, bvp_solver, idx_fix_species=None, tol_PB=1e-6): """ This function is supossed to be linked to the four_layer_two_surface_speciation function. It just gave the evaluated vector of Y, and T for the Newton-raphson procedure. The formulation of Westall (1980) is followed. FLM = four layer model """ # Speciation - mass action law Xmod=X.copy() for i in range(len(X)): if X[i]<=0: Xmod[i]=1 log_C = log_k + np.matmul(A,np.log10(Xmod)) # transf C = 10**(log_C) # Update T - "Electrostatic parameters" 'Notice that the last term of the psi_S1/2_v is not transformed from the boltzmann factor to the electrostatic potential!!!!!!!!!!!!' psi_S1_v = [Boltzman_factor_2_psi(X[pos_psi_S1_vec[0]], temp), Boltzman_factor_2_psi(X[pos_psi_S1_vec[1]], temp), Boltzman_factor_2_psi(X[pos_psi_S1_vec[2]], temp), X[pos_psi_S1_vec[3]]] psi_S2_v = [Boltzman_factor_2_psi(X[pos_psi_S2_vec[0]], temp), Boltzman_factor_2_psi(X[pos_psi_S2_vec[1]], temp), Boltzman_factor_2_psi(X[pos_psi_S2_vec[2]], temp), X[pos_psi_S2_vec[3]]] C_aq = C[idx_Aq] # [T, y0] = Update_T_FLM(T, sS1, sS2, e, temp, aS1, aS2, Z,CapacitancesS1, CapacitancesS2, psi_S1_v, psi_S2_v, zel, pos_psi_S1_vec, pos_psi_S2_vec, C_aq, d0,df, bvp_solver, tol_PB) # Calculation of Y Y= np.matmul(A.transpose(),C)-T return Y, T, y0 def Update_T_FLM(T, sS1, sS2, e, temp, aS1, aS2, Z,CapacitancesS1, CapacitancesS2, psi_S1_v, psi_S2_v, zel, pos_psi_S1_vec, pos_psi_S2_vec, C_aq, d0,df, bvp_solver, tol_PB): # constant F = 96485.3328959 # C/mol R = 8.314472 # J/(K*mol) eo = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum #e = 1.602176620898e-19 # C kb = 1.38064852e-23 # J/K other units --> kb=8,6173303e-5 eV/K Na = 6.022140857e23 # 1/mol elec_charge = 1.60217662e-19 #electron charge in C ########## S1 ##################### sigma_S1_0 = CapacitancesS1[0]*(psi_S1_v[0]-psi_S1_v[1]) sigma_S1_alpha = -sigma_S1_0 + CapacitancesS1[1]*(psi_S1_v[1]-psi_S1_v[2]) sigma_S1_beta = -sigma_S1_0-sigma_S1_alpha+CapacitancesS1[2]*(psi_S1_v[2]-psi_S1_v[3]) sigma_S1_gamma = -sigma_S1_0 - sigma_S1_alpha - sigma_S1_beta ########## S2 ##################### sigma_S2_0 = CapacitancesS2[0]*(psi_S2_v[0]-psi_S2_v[1]) sigma_S2_alpha = -sigma_S2_0 + CapacitancesS2[1]*(psi_S2_v[1]-psi_S2_v[2]) sigma_S2_beta = -sigma_S2_0-sigma_S2_alpha+CapacitancesS2[2]*(psi_S2_v[2]-psi_S2_v[3]) sigma_S2_gamma = -sigma_S2_0 - sigma_S2_alpha - sigma_S2_beta ########## T S1 ##################### T_S1_0 = ((sS1*aS1)/F)*sigma_S1_0; # units mol/L or mol/kg T_S1_alpha = ((sS1*aS1)/F)*sigma_S1_alpha; # units mol/L or mol/kg T_S1_beta = ((sS1*aS1)/F)*sigma_S1_beta; # units mol/L or mol/kg ########## T S2 ##################### T_S2_0 = ((sS2*aS2)/F)*sigma_S2_0; # units mol/L or mol/kg T_S2_alpha = ((sS2*aS2)/F)*sigma_S2_alpha; # units mol/L or mol/kg T_S2_beta = ((sS2*aS2)/F)*sigma_S2_beta; # units mol/L or mol/kg ################## PB part starts heres ######################################################################## ew = eo*e Q = Z*elec_charge # Q is the charve of the aqueous elements times the electron charge C = C_aq A =Na* 1000/ew # a prefactor = Avogadro * 1000 /ew #A = Na/ew kbt = 1.38064852e-23 *temp # kb (J/K) * T in K #y0 = np.zeros((2, x.size)) if type(bvp_solver) == list: y_0 = bvp_solver[0].copy() x = np.linspace(d0,df,y_0.shape[1]) #deltaing this #y1= bvp_solver[0].copy() #y2= bvp_solver[0].copy() #y3= bvp_solver[0].copy() else : y_0 = bvp_solver.y x = bvp_solver.x 'I think that y0[1,0] and y0[1,-1] are not necessary to solve the problem, I would say that its values do not have implications. Although I am not 100% sure.' y_0[1,0] = sigma_S1_gamma/ew # The negative value that I am given here is extremely arbitrary I am not sure why. IT MUST BE DISCUSSED # dpsi_d = -(sig_0 + sig_b + sig_d)/ew # electric field at diffuse layer, x>d y_0[1,-1] = -sigma_S2_gamma/ew y_0[0,0] = psi_S1_v[3] y_0[0,-1]= psi_S2_v[3] #y0[1,-1]= sigma_S2_gamma/ew args=[Q,C,A,kbt,y_0] result = solve_bvp(fun_PB, bc_PB, x, y_0, args = args, tol=tol_PB) # Looking further #y1[0,0] = psi_S1_v[0] #y1[0,-1] = psi_S2_v[0] #y1[1,0] = sigma_S1_0/ew #y1[1,-1] = -sigma_S2_0/ew #args=[Q,C,A,kbt,y1] #result1 = solve_bvp(fun_PB, bc_PB, x, y1, args = args, tol=tol_PB) #y2[0,0] = psi_S1_v[1] #y2[0,-1] = psi_S2_v[1] #y2[1,0] = sigma_S1_alpha/ew #y2[1,-1] = -sigma_S2_alpha/ew #args=[Q,C,A,kbt,y2] #result2 = solve_bvp(fun_PB, bc_PB, x, y2, args = args, tol=tol_PB) #y3[0,0] = psi_S1_v[2] #y3[0,-1] = psi_S2_v[2] #y3[1,0] = sigma_S1_beta/ew #y3[1,-1] = -sigma_S2_beta/ew #args=[Q,C,A,kbt,y3] #result3 = solve_bvp(fun_PB, bc_PB, x, y3, args = args, tol=tol_PB) #si1=-result1.y[1][0]*ew #si2=-result2.y[1][0]*ew #si3=-result3.y[1][0]*ew plt.figure(3) plt.plot(result.x, result.y[0]) # #assert 5==3 sigma_S1_d=-result.y[1][0]*ew sigma_S2_d=result.y[1][-1]*ew #sigma_S1_d=-result.y[1][0]*ew + CapacitancesS1[2]*(psi_S1_v[2]-psi_S1_v[3]) #sigma_S2_d=result.y[1][-1]*ew + CapacitancesS2[2]*(psi_S2_v[2]-psi_S2_v[3]) # T_S1_gammad = sigma_S1_gamma+sigma_S1_d T_S2_gammad = sigma_S2_gamma+sigma_S2_d # #print([sigma_S1_gamma, sigma_S2_gamma, sigma_S1_d, sigma_S2_d]) #print([psi_S1_v[3], psi_S2_v[3]]) # Now the values must be put in T T[pos_psi_S1_vec[0]] = T_S1_0 T[pos_psi_S1_vec[1]] = T_S1_alpha T[pos_psi_S1_vec[2]] = T_S1_beta T[pos_psi_S1_vec[3]] = T_S1_gammad T[pos_psi_S2_vec[0]] = T_S2_0 T[pos_psi_S2_vec[1]] = T_S2_alpha T[pos_psi_S2_vec[2]] = T_S2_beta T[pos_psi_S2_vec[3]] = T_S2_gammad return T, result def Boltzman_factor_2_psi (x,temp): ''' Transforms the equation from Xb = exp(-psi*F/RT) to psi = -ln(Xb)RT/F from Boltzman factor to electrostatic potential The units of "temp" (short for temperature) should be Kelvin ''' R = 8.314472 # J/(K*mol) F = 96485.3328959 # C/mol D = R*temp psi = - np.log(x)*(D/F) return psi def Jacobian_NR_FLM (X, A, log_k, temp, idx_Aq, sS1, aS1, sS2, aS2, e, CapacitancesS1, CapacitancesS2, T, Zi, zel, pos_psi_S1_vec, pos_psi_S2_vec, d0,df, bvp_class, idx_fix_species=None,tol_PB=1e-6): ''' This function should give the Jacobian. Here The jacobian is calculated as Westall (1980), except the electrostatic terms that are slightly different. The reason is because there seems to be some typos in Westall paper. Also, if idx_fix_species is given then the rows of the unknown will be 1 for the unknown and 0 for the other points. ''' # constant F = 96485.3328959 # C/mol [Faraday constant] R = 8.314472 # J/(K*mol) [universal constant gas] eo = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum elec_charge = 1.60217662e-19 #electron charge in C # Speciation - mass action law #log_C = log_k + A*np.log10(X) Xmod=X.copy() for i in range(len(X)): if X[i]<=0: Xmod[i]=1 log_C = log_k + np.matmul(A,np.log10(Xmod)) # transf C = 10**(log_C) C_aq = C[idx_Aq] #I = Calculate_ionic_strength(Z, C_aq) # instantiate Jacobian length_X = X.size Z = np.zeros((length_X,length_X)) # First part is the common of the Jacbian derivation for i in range(0, length_X): for j in range(0, length_X): Z[i,j]= np.matmul(np.multiply(A[:,i], A[:,j]), (C/X[j])) # Now the electrostatic part must be modified, one question hang on the air: # Should we check that the electrostatic part is as we expected? ############S1####################### sa_F2S1 = (sS1*aS1)/(F*F) C1_sa_F2_RTS1 = sa_F2S1*CapacitancesS1[0]*R*temp # Assigning in Jacobian (plane 0) Z[pos_psi_S1_vec[0],pos_psi_S1_vec[0]]=Z[pos_psi_S1_vec[0],pos_psi_S1_vec[0]] + C1_sa_F2_RTS1/X[pos_psi_S1_vec[0]] Z[pos_psi_S1_vec[0],pos_psi_S1_vec[1]]=Z[pos_psi_S1_vec[0],pos_psi_S1_vec[1]] - C1_sa_F2_RTS1/X[pos_psi_S1_vec[1]] #### plane alpha C1C2_sa_F2_RTS1 = sa_F2S1*R*temp*(CapacitancesS1[0]+CapacitancesS1[1]) C2_sa_F2_RTS1
<reponame>samaloney/STIXCore import os import sys import sqlite3 from types import SimpleNamespace import numpy as np from scipy import interpolate from stixcore.util.logging import get_logger __all__ = ['IDB', 'IDBPacketTypeInfo', 'IDBParameter', 'IDBStaticParameter', 'IDBVariableParameter', 'IDBPacketTree', 'IDBPi1ValPosition', 'IDBPolynomialCalibration', 'IDBCalibrationCurve', 'IDBCalibrationParameter'] logger = get_logger(__name__) class IDBPi1ValPosition(SimpleNamespace): """A class to represent parsing information for optional PI1_Val identifier. Attributes ---------- PIC_PI1_OFF : `int` PIC_PI1_OFF PIC_PI1_WID : `int` PIC_PI1_WID """ def __init__(self, *, PIC_PI1_OFF, PIC_PI1_WID): super().__init__(PIC_PI1_OFF=PIC_PI1_OFF, PIC_PI1_WID=PIC_PI1_WID) @property def offset(self): """Get number of bits as start position for the PI1_Val parameter started after header. Derived from PIC_PI1_OFF Returns ------- `int` Unsigned integer number of bits """ return (int(self.PIC_PI1_OFF) - 16) * 8 @property def width(self): """Get number of bits to read for the PI1_Val parameter. Derived from PIC_PI1_WID Returns ------- `int` bits """ return self.PIC_PI1_WID class IDBPacketTypeInfo(SimpleNamespace): """A class to represent descriptive information for a idb packet type. Attributes ---------- PID_SPID : `int` SCOS-2000 Telemetry Packet Number. Unsigned integer number in the range (1....2^32-1) (note that zero is not allowed). PID_DESCR : `str` Textual description of the SCOS-2000 telemetry packet (max 64 characters). PID_TPSD : `int`: SCOS-2000 Telemetry Packet Structure Definition. This field is only used by the Variable Packets Display application. It has to be set to `-1` for packets which are not defined in the VPD table and thus are not required to be processed by the Variable PacketsDisplay. If not set to –1, unsigned integer number in the range (1....2^31-1) (note that zero is not allowed). """ def __init__(self, *, PID_SPID, PID_DESCR, PID_TPSD): super(IDBPacketTypeInfo, self).__init__(PID_SPID=PID_SPID, PID_DESCR=PID_DESCR, PID_TPSD=PID_TPSD) def is_variable(self): """Is the telemetry packet of variable length. Returns ------- `bool` True if the TM packet has a variable size """ return self.PID_TPSD != -1 class IDBPolynomialCalibration: """A class to represent a 4th order polynomial calibration defined in the IDB.""" def __init__(self, rows): """Construct all the necessary attributes for the IDBPolynomialCalibration object. Parameters ---------- rows : `list` the polynomial parameters from the IDB """ try: self.orig = rows self.A = [float(row) for row in rows[0]] self.valid = True except (ValueError, IndexError): self.valid = False def __repr__(self): return f'{self.__class__.__name__}({self.orig})' def __call__(self, x): """Apply the polynomial function to the raw value. Parameters ---------- x : `number` the raw value Returns ------- `float` polynomial function value """ x = np.array(x) res = (self.A[0] * x ** 0 + self.A[1] * x ** 1 + self.A[2] * x ** 2 + self.A[3] * x ** 3 + self.A[4] * x ** 4) return res.tolist() if self.valid else None class IDBCalibrationCurve: """A class to represent a calibration curve for a LUT based interpolation defined in the IDB.""" def __init__(self, rows, param): """Construct all the necessary attributes for the IDBCalibrationCurve object. Parameters ---------- rows : `list` [x, y] all support points from the IDB param : `IDBCalibrationParameter` """ try: self.x = [float(row[0]) for row in rows] self.y = [float(row[1]) for row in rows] self.valid = True except ValueError: self.valid = False self.param = param self.orig = rows if len(self) <= 1: logger.error(f'Invalid curve calibration parameter {param.PCF_NAME} / \ {param.PCF_CURTX}: at least two data points needed') self.valid = False def __repr__(self): return f'{self.__class__.__name__}({self.orig})' def __len__(self): return len(self.x) def __call__(self, raw): """Apply the interpolation function with the raw value based on the LUT provided by the IDB. Parameters ---------- raw : `number` The raw value to apply to Returns ------- `float` interpolated value """ if not self.valid: return None if len(self) == 2: return ((self.y[1] - self.y[0]) / (self.x[1] - self.x[0]) * (raw - self.x[0]) + self.y[0]) try: tck = interpolate.splrep(self.x, self.y) val = interpolate.splev(raw, tck) return val except Exception as e: logger.error(f'Failed to curve calibrate {self.param.PCF_NAME} / \ {self.param.PCF_CURTX} due to {e}') class IDBParameter(IDBPacketTypeInfo): """A base class to represent a parameter of a SCOS-2000 Telemetry Packet. Attributes ---------- PID_SPID : `int` SCOS-2000 Telemetry Packet Number the parameter belongs to. Unsigned integer number in the range (1....2^32-1) (note that zero is not allowed). PID_DESCR : `str` Textual description of the SCOS-2000 telemetry packet the parameter belongs to max 64 charactars. PID_TPSD : `int` SCOS-2000 Telemetry Packet Structure Definition. This field is only used by the Variable Packets Display application. It has to be set to ‘-1’ for packets which are not defined in the VPD table and thus are not required to be processed by the Variable PacketsDisplay. If not set to –1, unsigned integer number in the range (1....2^31-1) (note that zero is not allowed). PCF_NAME : `str` Name of the parameter. Alphanumeric string uniquely identifying the monitoring parameter (max 8 characters). PCF_DESCR : `str` Parameter Description - free textual description of the parameter. PCF_WIDTH : `int` 'Padded' width of this parameter expressed in number of bits. This field is only used when extracting parameter samples using the VPD definition to identify the bitposition where the next telemetry parameter starts PCF_PFC : `int` Parameter Format Code. Along with the Parameter Type Code (PCF_PTC) this field controls the length of the parameter. Integer value in a range compatible with the specified PCF_PTC PCF_PTC : `int` Parameter Type Code. This controls the encoding format of the parameter. Integer value in the range (1..13). PCF_CURTX : `int` Parameter calibration identification name. Depending on parameter category, this field stores the numerical calibration or the textual calibration identification name. 2K_TYPE : `str` TBD. bin_format : `str` Read instruction format of the specific parameter for processing the bit stream e.g. "int:8". See `bitstream.ConstBitStream.read` for more information. """ def __init__(self, *, PID_SPID, PID_DESCR, PID_TPSD, PCF_NAME, PCF_DESCR, PCF_WIDTH, PCF_PFC, PCF_PTC, PCF_CURTX, S2K_TYPE, bin_format=''): super(IDBPacketTypeInfo, self).__init__(PID_SPID=PID_SPID, PID_DESCR=PID_DESCR, PID_TPSD=PID_TPSD) self.PCF_NAME = PCF_NAME self.PCF_DESCR = PCF_DESCR self.PCF_WIDTH = PCF_WIDTH self.PCF_PFC = PCF_PFC self.PCF_PTC = PCF_PTC self.PCF_CURTX = PCF_CURTX self.S2K_TYPE = S2K_TYPE self.bin_format = bin_format class IDBStaticParameter(IDBParameter): """A class to represent a parameter of a static SCOS-2000 Telemetry Packet. Attributes ---------- PLF_OFFBY : `int` Location of first occurrence of parameter value in octets, relative to the end of the SCOS-2000 TM header. Integer value starting from 0 (negative values are not allowed). PLF_OFFBI : `int` Bit number, within an octet, of the first bit of the first occurrence of the parameter value. Bit 0 corresponds to the most left bit withinthe byte. Integer value in the range (0..7). """ def __init__(self, *, PLF_OFFBY, PLF_OFFBI, **kwargs): super(IDBStaticParameter, self).__init__(**kwargs) self.PLF_OFFBY = PLF_OFFBY self.PLF_OFFBI = PLF_OFFBI @staticmethod def is_variable(): """Is the parameter for a variable telemetry packet. Returns ------- `bool` Always False for static parameters """ return False class IDBVariableParameter(IDBParameter): """A class to represent a parameter of a variable SCOS-2000 Telemetry Packet. Attributes ---------- VPD_POS : `int` Ordinal position of this parameter inside the packet definition in ascending order. VPD_OFFSET : `int` Number of bits between the start position of this parameter and the end bit of the previous parameter in the packet. A positive offset enables the introduction of a ‘gap’ between the previous parameter and this one. A negative offset enables the ‘overlap’ of the bits contributing to this parameter with the ones contributing to the previous parameter(s). Integer value in the range (-32768..32767). VPD_GRPSIZE : `int` This value should only be set for parameters which identify a repeat counter N """ def __init__(self, *, VPD_POS, VPD_OFFSET, VPD_GRPSIZE, **kwargs): super(IDBVariableParameter, self).__init__(**kwargs) self.VPD_POS = VPD_POS self.VPD_OFFSET = VPD_OFFSET self.VPD_GRPSIZE = VPD_GRPSIZE @staticmethod def is_variable(): """Is the parameter for a variable telemetry packet. Returns ------- `bool` Always True for this class """ return True class IDBCalibrationParameter(IDBParameter): """A class to represent a parameter for calibration. PCF_NAME': 'NIXD0167', 'PCF_CURTX': 'CAAT0033TM', 'PCF_CATEG': 'S', 'PCF_UNIT': None Attributes ---------- PCF_CATEG : `str` Calibration category of the parameter one of N|S|T|R|D|P|H|S|C. STIX only uses (N)umeric and (S)tring at the moment. PCF_UNIT : `str` Engineering unit mnemonic of the parameter values e.g. ‘VOLT’ (max length 4). """ def __init__(self, *, PCF_CATEG, PCF_UNIT, **kwargs): super(IDBCalibrationParameter, self).__init__(**kwargs) self.PCF_CATEG = PCF_CATEG self.PCF_UNIT = PCF_UNIT class IDBPacketTree: """Class representing a dynamic telemetry packet of variable length in a tree structure with nested repeaters.""" def __init__(self, *, children=None, counter=1, name='top',
in any attribute values for _, attr in self._auto_attribs: attr.set_value(etree, self) def toxml(self, etree=None, **options): """ If `etree` is specified, then this theory object will be serialized into that element tree Element; otherwise, a new Element will be created. """ #print 'serializing %s' % self.__class__.__theory_name__ indent = options.get('indent') if indent is not None: options['indent'] += ' ' if etree is None: etree = ET.Element(self.__class__.__theory_name__) else: assert etree.tag == self.__class__.__theory_name__, ( etree.tag, self.__class__.__theory_name__) for _, attr in self._auto_attribs: attr.serialize(etree, self, **options) # Indentation... if len(etree) > 0 and indent is not None: etree.text = '\n' + indent+' ' for child in etree[:-1]: child.tail = '\n' + indent+' ' etree[-1].tail = '\n' + indent if indent is not None: options['indent'] = indent etree.tail = '\n' return etree def pprint(self, depth=-1, hide=(), follow_pointers=False, indent=' ', memo=None): """ Return a pretty-printed string representation of this theory object. The first line identifies this theory object, and the subsequent lines describe its contents (including nested or referenced theory objects). @param depth: The maximum depth to which nested theory objects should be displayed. @param hide: A set of names of attributes that should not be displayed. (By default, the XML id and the EDT and byte offsets are not displayed by __str__). @param follow_pointers: If true, then attributes that contain pointers have their contents displayed just like nested elements. If false, then the pointer targets are not expanded. """ if memo is None: memo = set() if id(self) in memo: return '<%s...>' % self.__class__.__theory_name__ memo.add(id(self)) s = self._pprint_firstline(indent) for attr_name, attr_spec in self.__class__._auto_attribs: if attr_name in hide: continue val = getattr(self, attr_name) if attr_name == '_children': attr_name = '' elif attr_name.startswith('_'): continue attr_depth = depth if (not follow_pointers and val is not None and isinstance(attr_spec, _ReferenceAttribute) and not isinstance(val, DanglingPointer)): s += '\n%s%s = <%s...>' % ( indent, attr_name, getattr(val.__class__, '__theory_name__', val.__class__.__name__)) else: s += '\n'+self._pprint_value(attr_name, val, attr_depth, hide, follow_pointers, indent, memo) return s def _get_summary(self): return None def _pprint_firstline(self, indent): s = self.__class__.__theory_name__ + ':' text = self._get_summary() if text: maxlen = max(9, 65-len(indent)- len(self.__class__.__theory_name__)*2) s += ' %s' % _truncate(text, maxlen) return s def _pprint_value(self, attr, val, depth, hide, follow_pointers, indent, memo): s = indent if attr: s += attr + ' = ' if isinstance(val, Theory): if depth is not None and depth == 0: return s+'<%s...>' % getattr(val.__class__, '__theory_name__', val.__class__.__name__) return s+val.pprint(depth-1, hide, follow_pointers, indent+' ', memo) elif isinstance(val, list): if len(val) == 0: return s+'[]' if depth is not None and depth == 0: return s+'[...]' items = [self._pprint_value('', item, depth-1, hide, follow_pointers, indent+' ', memo) for item in val] if depth == 1 and len(items) > 12: items = items[:10] + ['%s ...and %d more...' % (indent, len(items)-10)] s += '[\n%s\n%s]' % ('\n'.join(items), indent) return s elif isinstance(val, basestring): text=repr(val) maxlen = max(9, 75-len(s)) if len(text) > maxlen: text = text[:maxlen-9]+'...'+text[-6:] return s+text else: return s+repr(val) _default_hidden_attrs = set(['id', 'start_byte', 'end_byte', 'start_edt', 'end_edt']) def __repr__(self): text = self._get_summary() if text: return '<%s %s>' % (self.__class__.__theory_name__, text) else: return '<%s>' % self.__class__.__theory_name__ def __str__(self): return self.pprint(depth=2, hide=self._default_hidden_attrs, follow_pointers=False) @property def owner(self): """The theory object that owns this Theory""" if self._owner is None: return None else: return self._owner() def owner_with_type(self, theory_class): """ Find and return the closest owning theory with the given class. If none is found, return None. E.g., use tok.owner(Sentence) to find the sentence containing a token. """ if isinstance(theory_class, basestring): theory_class = Theory._theory_classes[theory_class] theory = self while theory is not None and not isinstance(theory, theory_class): if theory._owner is None: return None theory = theory._owner() return theory @property def document(self): """The document that contains this Theory""" return self.owner_with_type(Document) def resolve_pointers(self, fail_on_dangling_pointer=True): """ Replace reference attributes with their actual values for this theory and any theory owned by this theory (directly or indirectly). Prior to calling this, every time you access a reference attribute, its value will be looked up in the document's identifier map. @param fail_on_dangling_pointer: If true, then raise an exception if we find a dangling pointer. """ for attr_name, attr_spec in self._auto_attribs: attr_val = getattr(self, attr_name) # Replace any reference attribute w/ its actual value (unless # it's a dangling pointer) if isinstance(attr_spec, _ReferenceAttribute): if attr_name not in self.__dict__: if not isinstance(attr_val, DanglingPointer): setattr(self, attr_name, attr_val) elif fail_on_dangling_pointer: raise ValueError('Dangling pointer: %r' % attr_val) # Recurse to any owned objects. elif isinstance(attr_val, Theory): attr_val.resolve_pointers(fail_on_dangling_pointer) @classmethod def _help_header(cls): return 'The %r class defines the following attributes:' % ( cls.__theory_name__) @classmethod def help(cls): props = [(k,v) for base in cls.mro() for (k,v) in base.__dict__.items() if isinstance(v, property)] s = cls._help_header()+'\n' w = max([8]+[len(n) for (n,_) in cls._auto_attribs]+ [len(n) for (n,_) in props])+2 for attr_name, attr_spec in cls._auto_attribs: if attr_name == '_children': continue help_line = textwrap.fill(attr_spec.help(), initial_indent=' '*(w+3), subsequent_indent=' '*(w+3)).strip() s += ' %s %s\n' % (attr_name.ljust(w, '.'), help_line) if props: s += ('The following derived properties are also ' 'available as attributes:\n') for (k,v) in props: help_text = v.__doc__ or '(undocumented)' help_text = help_text.replace( 'this Theory', 'this '+cls.__theory_name__) help_text = ' '.join(help_text.split()) help_line = textwrap.fill( help_text, initial_indent=' '*(w+3), subsequent_indent=' '*(w+3)).strip() s += ' %s %s\n' % (k.ljust(w, '.'), help_line) print s.rstrip() def _truncate(text, maxlen): if text is None: return None elif len(text) <= maxlen: return text else: return text[:maxlen-9]+'...'+text[-6:] class DocumentTheory(Theory): def __init__(self, etree=None, owner=None, **attribs): self._idmap = weakref.WeakValueDictionary() Theory.__init__(self, etree, owner, **attribs) _OWNER_IS_REQUIRED = False def _init_from_etree(self, etree, owner): # If the argument isn't an etree, then create one. if hasattr(etree, 'makeelement'): pass # ok. elif hasattr(etree, 'getroot'): etree = etree.getroot() # ElementTree object elif isinstance(etree, basestring): if re.match('^\s*<', etree): etree = ET.fromstring(etree) # xml string elif '\n' not in etree: etree = ET.parse(etree).getroot() # filename else: raise ValueError('Expected a filename, xml string, stream, ' 'or ElementTree. Got a %s' % etree.__class__.__name__) elif hasattr(etree, 'read'): etree = ET.fromstring(etree.read()) # file object else: raise ValueError('Expected a filename, xml string, stream, ' 'or ElementTree. Got a %s' % etree.__class__.__name__) # If we got a CAMEO_XML element, then take its document. if (etree.tag == 'CAMEO_XML' and len(etree) == 1 and etree[0].tag == 'Document'): etree = etree[0] Theory._init_from_etree(self, etree, owner) # Resolve pointers. self.resolve_pointers() def save(self, file_or_filename): cameoxml_etree = ET.Element('Document') cameoxml_etree.text = '\n ' etree=getattr(self, '_etree', None) cameoxml_etree.append(self.toxml(etree, indent=' ')) ET.ElementTree(cameoxml_etree).write(file_or_filename) def register_id(self, theory): if theory.id is not None: if theory.id in self._idmap: raise ValueError('Duplicate id %s' % theory.id) self._idmap[theory.id] = theory def lookup_id(self, theory_id): return self._idmap.get(theory_id) _default_hidden_attrs = set(['lexicon']) class SequenceTheory(Theory): _children = "This class attr must be defined by subclasses." def __len__(self): return len(self._children) def __iter__(self): return self._children.__iter__() def __contains__(self, item): return self._children.__contains__(item) def __getitem__(self, n): return self._children.__getitem__(n) def __repr__(self): return '<%s: %s>' % (self.__class__.__theory_name__, self._children) def resolve_pointers(self, fail_on_dangling_pointer=True): Theory.resolve_pointers(self, fail_on_dangling_pointer) for child in self._children: child.resolve_pointers(fail_on_dangling_pointer) @classmethod def _help_header(cls): child_class_name = cls._children._cls_name return textwrap.fill( 'The %r class acts as a sequence of %r elements. ' 'Additionally, it defines the following attributes:' % (cls.__theory_name__, child_class_name)) ###################################################################### #{ Theory Classes ###################################################################### class Document(DocumentTheory): sentences = _ChildTheoryElement('Sentences') events = _ChildTheoryElement('Events') class Sentences(SequenceTheory): _children = _ChildTheoryElementList('Sentence', index_attrib='sent_no') class Sentence(Theory): id = _SimpleAttribute(is_required=True) char_offsets = _SimpleAttribute(is_required=True) contents = _TextOfElement('Contents') class Events(SequenceTheory): _children = _ChildTheoryElementList('Event') class Event(Theory): id = _SimpleAttribute(is_required=True) participants = _ChildTheoryElementList('Participant') type = _SimpleAttribute(is_required=True) tense = _SimpleAttribute(is_required=True) sentence_id = _SimpleAttribute(is_required=True) class Participant(Theory): role = _SimpleAttribute(default='') actor_id = _SimpleAttribute(default='') actor_name = _SimpleAttribute(default='') agent_id = _SimpleAttribute(default='') agent_name = _SimpleAttribute(default='') ###################################################################### # ACCENT HTTP Server ###################################################################### HOSTNAME = 'localhost' PORT = 9999 PROCESS_DOCUMENT_TEMPLATE = r''' <SerifXMLRequest> <ProcessDocument end_stage="%(end_stage)s" output_format="CAMEOXML" input_type="%(input_type)s" %(date_string)s> %(document)s </ProcessDocument> </SerifXMLRequest> ''' DOCUMENT_TEMPLATE = r''' <Document language="%(language)s" docid="%(docid)s"> <OriginalText><Contents>%(content)s</Contents></OriginalText> </Document> ''' def send_pd_request(document, hostname=HOSTNAME, port=PORT, end_stage='output', input_type='auto', verbose=False, document_date=None, timeout=0, num_tries=1): """ Send a XML request to process the given document to the specified server. If successful, then return a `Document` object containing the processed document. If unsuccessful, then raise an exception with the response message from the server. @param document: A string containing an XML <Document> element. @param hostname: The hostname of the HTTP server. @param port: The port on which the HTTP server is listening. @param end_stage: The
# Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["missing_field"].copy(), "source": self.make_source_adminuser() } registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(d.data["invalid_fields"], "Invalid combination of fields") def test_validate_registration_run_failure_bad_fields(self): # Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["bad_fields"].copy(), "source": self.make_source_adminuser() } registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(sorted(d.data["invalid_fields"]), sorted(["msg_receiver", "last_period_date"])) def test_validate_registration_run_failure_bad_lmp(self): # Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["bad_lmp"].copy(), "source": self.make_source_adminuser() } registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(d.data["invalid_fields"], ["last_period_date out of range"]) def test_validate_registration_run_failure_receiver_id(self): # Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_friend"].copy(), "source": self.make_source_adminuser() } # reg_data = registration_data.copy() registration_data["data"]["receiver_id"] = registration_data[ "mother_id"] registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(d.data["invalid_fields"], "mother requires own id") def test_validate_registration_run_failure_mother_uuid(self): # Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5", "data": REG_DATA["hw_pre_mother"].copy(), "source": self.make_source_adminuser() } registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(d.data["invalid_fields"], "Invalid UUID mother_id") def test_validate_registration_run_failure_mother_id(self): # Setup registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_mother"].copy(), "source": self.make_source_adminuser() } # reg_data = registration_data.copy() registration_data["data"]["receiver_id"] = str(uuid.uuid4()) registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.apply_async(args=[registration.id]) # Check self.assertEqual(result.get(), "Validation completed - Failure") d = Registration.objects.get(id=registration.id) self.assertEqual(d.data["invalid_fields"], "mother_id should be the same as receiver_id") class TestSubscriptionRequest(AuthenticatedAPITestCase): @responses.activate def test_mother_only_prebirth_sms(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.text.10_42' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 1, "short_name": 'prebirth.mother.text.10_42', "default_schedule": 1 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/1/', json={"id": 1, "day_of_week": "1,3,5"}, status=200, content_type='application/json', ) # mock household schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/3/', json={"id": 3, "day_of_week": "5"}, status=200, content_type='application/json', ) # mock mother MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/mother00-9d89-4aa6-99ff-13c225365b5d/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234123"}] }, status=200, content_type='application/json', match_querystring=True ) # mock mother SMS send responses.add( responses.POST, 'http://localhost:8006/api/v1/outbound/', json={"id": 1}, status=200, content_type='application/json', ) # prepare registration data registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_mother"].copy(), "source": self.make_source_adminuser() } registration_data["data"]["preg_week"] = 15 registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.create_subscriptionrequests( registration) # Check self.assertEqual(result, "1 SubscriptionRequest created") d_mom = SubscriptionRequest.objects.last() self.assertEqual(d_mom.identity, "mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.messageset, 1) self.assertEqual(d_mom.next_sequence_number, 15) self.assertEqual(d_mom.lang, "eng_NG") self.assertEqual(d_mom.schedule, 1) @responses.activate def test_mother_only_prebirth_voice_tue_thu_9_11(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.audio.10_42.tue_thu.9_11' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 2, "short_name": 'prebirth.mother.audio.10_42.tue_thu.9_11', "default_schedule": 6 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/6/', json={"id": 6, "day_of_week": "2,4"}, status=200, content_type='application/json', match_querystring=True ) # prepare registration data registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_mother"].copy(), "source": self.make_source_adminuser() } registration_data["data"]["preg_week"] = 15 registration_data["data"]["msg_type"] = "audio" registration_data["data"]["voice_times"] = "9_11" registration_data["data"]["voice_days"] = "tue_thu" registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.create_subscriptionrequests( registration) # Check self.assertEqual(result, "1 SubscriptionRequest created") d_mom = SubscriptionRequest.objects.last() self.assertEqual(d_mom.identity, "mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.messageset, 2) self.assertEqual(d_mom.next_sequence_number, 10) self.assertEqual(d_mom.lang, "eng_NG") self.assertEqual(d_mom.schedule, 6) @responses.activate def test_friend_only_prebirth_sms(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.text.10_42' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 1, "short_name": 'prebirth.mother.text.10_42', "default_schedule": 1 }] }, status=200, content_type='application/json', match_querystring=True ) # mock household messageset lookup query_string = '?short_name=prebirth.household.audio.10_42.fri.9_11' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 3, "short_name": 'prebirth.household.audio.10_42.fri.9_11', "default_schedule": 3 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/1/', json={"id": 1, "day_of_week": "1,3,5"}, status=200, content_type='application/json', ) # mock household schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/3/', json={"id": 3, "day_of_week": "5"}, status=200, content_type='application/json', ) # mock mother MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/mother00-9d89-4aa6-99ff-13c225365b5d/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234123"}] }, status=200, content_type='application/json', match_querystring=True ) # mock friend MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/friend00-73a2-4d89-b045-d52004c025fe/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234123"}] }, status=200, content_type='application/json', match_querystring=True ) # mock mother SMS send responses.add( responses.POST, 'http://localhost:8006/api/v1/outbound/', json={"id": 1}, status=200, content_type='application/json', ) # prepare registration data registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_friend"].copy(), "source": self.make_source_adminuser() } registration_data["data"]["preg_week"] = 15 registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.create_subscriptionrequests( registration) # Check self.assertEqual(result, "2 SubscriptionRequests created") d_mom = SubscriptionRequest.objects.get( identity="mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.identity, "mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.messageset, 1) self.assertEqual(d_mom.next_sequence_number, 15) self.assertEqual(d_mom.lang, "eng_NG") self.assertEqual(d_mom.schedule, 1) d_friend = SubscriptionRequest.objects.get( identity="friend00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_friend.identity, "friend00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_friend.messageset, 3) self.assertEqual(d_friend.next_sequence_number, 5) self.assertEqual(d_friend.lang, "eng_NG") self.assertEqual(d_friend.schedule, 3) @responses.activate def test_friend_only_voice_mon_wed_2_5(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.audio.10_42.mon_wed.2_5' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 2, "short_name": 'prebirth.mother.audio.10_42.mon_wed.2_5', "default_schedule": 5 }] }, status=200, content_type='application/json', match_querystring=True ) # mock household messageset lookup query_string = '?short_name=prebirth.household.audio.10_42.fri.9_11' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 3, "short_name": 'prebirth.household.audio.10_42.fri.9_11', "default_schedule": 3 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/5/', json={"id": 5, "day_of_week": "1,3"}, status=200, content_type='application/json', match_querystring=True ) # mock household schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/3/', json={"id": 3, "day_of_week": "5"}, status=200, content_type='application/json', ) # prepare registration data registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_friend"].copy(), "source": self.make_source_adminuser() } registration_data["data"]["preg_week"] = 15 registration_data["data"]["msg_type"] = "audio" registration_data["data"]["voice_times"] = "2_5" registration_data["data"]["voice_days"] = "mon_wed" registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.create_subscriptionrequests( registration) # Check self.assertEqual(result, "2 SubscriptionRequests created") d_mom = SubscriptionRequest.objects.get( identity="mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.identity, "mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.messageset, 2) self.assertEqual(d_mom.next_sequence_number, 10) self.assertEqual(d_mom.lang, "eng_NG") self.assertEqual(d_mom.schedule, 5) self.assertEqual(d_mom.metadata["prepend_next_delivery"], "http://registration.dev.example.org/static/audio/registration/eng_NG_welcome_mother.mp3") # noqa d_friend = SubscriptionRequest.objects.get( identity="friend00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_friend.identity, "friend00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_friend.messageset, 3) self.assertEqual(d_friend.next_sequence_number, 5) self.assertEqual(d_friend.lang, "eng_NG") self.assertEqual(d_friend.schedule, 3) self.assertEqual(d_friend.metadata["prepend_next_delivery"], "http://registration.dev.example.org/static/audio/registration/eng_NG_welcome_household.mp3") # noqa @responses.activate def test_family_only_prebirth_sms(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.text.10_42' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 1, "short_name": 'prebirth.mother.text.10_42', "default_schedule": 1 }] }, status=200, content_type='application/json', match_querystring=True ) # mock household messageset lookup query_string = '?short_name=prebirth.household.audio.10_42.fri.9_11' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 3, "short_name": 'prebirth.household.audio.10_42.fri.9_11', "default_schedule": 3 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/1/', json={"id": 1, "day_of_week": "1,3,5"}, status=200, content_type='application/json', ) # mock household schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/3/', json={"id": 3, "day_of_week": "5"}, status=200, content_type='application/json', ) # mock mother MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/mother00-9d89-4aa6-99ff-13c225365b5d/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234123"}] }, status=200, content_type='application/json', match_querystring=True ) # mock family MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/family00-73a2-4d89-b045-d52004c025fe/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234124"}] }, status=200, content_type='application/json', match_querystring=True ) # mock mother SMS send responses.add( responses.POST, 'http://localhost:8006/api/v1/outbound/', json={"id": 1}, status=200, content_type='application/json', ) # prepare registration data registration_data = { "stage": "prebirth", "mother_id": "mother00-9d89-4aa6-99ff-13c225365b5d", "data": REG_DATA["hw_pre_family"].copy(), "source": self.make_source_adminuser() } registration_data["data"]["preg_week"] = 15 registration = Registration.objects.create(**registration_data) # Execute result = validate_registration.create_subscriptionrequests( registration) # Check self.assertEqual(result, "2 SubscriptionRequests created") d_mom = SubscriptionRequest.objects.get( identity="mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.identity, "mother00-9d89-4aa6-99ff-13c225365b5d") self.assertEqual(d_mom.messageset, 1) self.assertEqual(d_mom.next_sequence_number, 15) self.assertEqual(d_mom.lang, "eng_NG") self.assertEqual(d_mom.schedule, 1) d_family = SubscriptionRequest.objects.get( identity="family00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_family.identity, "family00-73a2-4d89-b045-d52004c025fe") self.assertEqual(d_family.messageset, 3) self.assertEqual(d_family.next_sequence_number, 5) self.assertEqual(d_family.lang, "eng_NG") self.assertEqual(d_family.schedule, 3) @responses.activate def test_mother_and_father_prebirth_sms(self): # Setup # mock mother messageset lookup query_string = '?short_name=prebirth.mother.text.10_42' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 1, "short_name": 'prebirth.mother.text.10_42', "default_schedule": 1 }] }, status=200, content_type='application/json', match_querystring=True ) # mock household messageset lookup query_string = '?short_name=prebirth.household.audio.10_42.fri.9_11' responses.add( responses.GET, 'http://localhost:8005/api/v1/messageset/%s' % query_string, json={ "next": None, "previous": None, "results": [{ "id": 3, "short_name": 'prebirth.household.audio.10_42.fri.9_11', "default_schedule": 3 }] }, status=200, content_type='application/json', match_querystring=True ) # mock mother schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/1/', json={"id": 1, "day_of_week": "1,3,5"}, status=200, content_type='application/json', ) # mock household schedule lookup responses.add( responses.GET, 'http://localhost:8005/api/v1/schedule/3/', json={"id": 3, "day_of_week": "5"}, status=200, content_type='application/json', ) # mock mother MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/mother00-9d89-4aa6-99ff-13c225365b5d/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234123"}] }, status=200, content_type='application/json', match_querystring=True ) # mock father MSISDN lookup responses.add( responses.GET, 'http://localhost:8001/api/v1/identities/father00-73a2-4d89-b045-d52004c025fe/addresses/msisdn?default=True', # noqa json={ "next": None, "previous": None, "results": [{"address": "+234124"}] }, status=200, content_type='application/json', match_querystring=True ) # mock mother SMS send responses.add( responses.POST, 'http://localhost:8006/api/v1/outbound/', json={"id":
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") cos = ET.SubElement(qos, "cos") cos.text = kwargs.pop('cos') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_trust_trust_cos(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") trust = ET.SubElement(qos, "trust") trust_cos = ET.SubElement(trust, "trust-cos") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_cos_mutation(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") cos_mutation = ET.SubElement(qos, "cos-mutation") cos_mutation.text = kwargs.pop('cos_mutation') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_cos_traffic_class(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") cos_traffic_class = ET.SubElement(qos, "cos-traffic-class") cos_traffic_class.text = kwargs.pop('cos_traffic_class') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_tx(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") flowcontrol = ET.SubElement(qos, "flowcontrol") flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal") tx = ET.SubElement(flowcontrolglobal, "tx") tx.text = kwargs.pop('tx') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_rx(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") flowcontrol = ET.SubElement(qos, "flowcontrol") flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal") rx = ET.SubElement(flowcontrolglobal, "rx") rx.text = kwargs.pop('rx') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_cos(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") flowcontrol = ET.SubElement(qos, "flowcontrol") pfc = ET.SubElement(flowcontrol, "pfc") pfc_cos = ET.SubElement(pfc, "pfc-cos") pfc_cos.text = kwargs.pop('pfc_cos') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_tx(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") flowcontrol = ET.SubElement(qos, "flowcontrol") pfc = ET.SubElement(flowcontrol, "pfc") pfc_cos_key = ET.SubElement(pfc, "pfc-cos") pfc_cos_key.text = kwargs.pop('pfc_cos') pfc_tx = ET.SubElement(pfc, "pfc-tx") pfc_tx.text = kwargs.pop('pfc_tx') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_rx(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") flowcontrol = ET.SubElement(qos, "flowcontrol") pfc = ET.SubElement(flowcontrol, "pfc") pfc_cos_key = ET.SubElement(pfc, "pfc-cos") pfc_cos_key.text = kwargs.pop('pfc_cos') pfc_rx = ET.SubElement(pfc, "pfc-rx") pfc_rx.text = kwargs.pop('pfc_rx') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_mac_access_group_access_group_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") mac = ET.SubElement(security_profile, "mac") access_group = ET.SubElement(mac, "access-group") access_group_name = ET.SubElement(access_group, "access-group-name") access_group_name.text = kwargs.pop('access_group_name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_mac_access_group_in_cg(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") mac = ET.SubElement(security_profile, "mac") access_group = ET.SubElement(mac, "access-group") in_cg = ET.SubElement(access_group, "in") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_ip_access_group_ipv4_access_group_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") ip = ET.SubElement(security_profile, "ip") access_group = ET.SubElement(ip, "access-group") ipv4_access_group_name = ET.SubElement(access_group, "ipv4-access-group-name") ipv4_access_group_name.text = kwargs.pop('ipv4_access_group_name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_ip_access_group_ipv4_in(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") ip = ET.SubElement(security_profile, "ip") access_group = ET.SubElement(ip, "access-group") ipv4_in = ET.SubElement(access_group, "ipv4-in") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_ipv6_access_group_ipv6_access_group_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") ipv6 = ET.SubElement(security_profile, "ipv6") access_group = ET.SubElement(ipv6, "access-group") ipv6_access_group_name = ET.SubElement(access_group, "ipv6-access-group-name") ipv6_access_group_name.text = kwargs.pop('ipv6_access_group_name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_security_profile_ipv6_access_group_ipv6_in(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') security_profile = ET.SubElement(port_profile, "security-profile") ipv6 = ET.SubElement(security_profile, "ipv6") access_group = ET.SubElement(ipv6, "access-group") ipv6_in = ET.SubElement(access_group, "ipv6-in") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_restrict_flooding_container_restrict_flooding(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') restrict_flooding_container = ET.SubElement(port_profile, "restrict-flooding-container") restrict_flooding = ET.SubElement(restrict_flooding_container, "restrict-flooding") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_global_port_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile = ET.SubElement(port_profile_global, "port-profile") name = ET.SubElement(port_profile, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_global_port_profile_activate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile = ET.SubElement(port_profile_global, "port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') activate = ET.SubElement(port_profile, "activate") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_global_port_profile_static_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile = ET.SubElement(port_profile_global, "port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') static = ET.SubElement(port_profile, "static") mac_address = ET.SubElement(static, "mac-address") mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_domain_port_profile_domain_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile_domain_name = ET.SubElement(port_profile_domain, "port-profile-domain-name") port_profile_domain_name.text = kwargs.pop('port_profile_domain_name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_domain_profile_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile_domain_name_key = ET.SubElement(port_profile_domain, "port-profile-domain-name") port_profile_domain_name_key.text = kwargs.pop('port_profile_domain_name') profile = ET.SubElement(port_profile_domain, "profile") profile_name = ET.SubElement(profile, "profile-name") profile_name.text = kwargs.pop('profile_name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name = ET.SubElement(port_profile, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_allow_nonprofiledmacs(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') allow = ET.SubElement(port_profile, "allow") nonprofiledmacs = ET.SubElement(allow, "nonprofiledmacs") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_basic_basic(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport_basic = ET.SubElement(vlan_profile, "switchport-basic") basic = ET.SubElement(switchport_basic, "basic") callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_mode_vlan_mode(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport = ET.SubElement(vlan_profile, "switchport") mode = ET.SubElement(switchport, "mode") vlan_mode = ET.SubElement(mode, "vlan-mode") vlan_mode.text = kwargs.pop('vlan_mode') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_access_vlan_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport = ET.SubElement(vlan_profile, "switchport") access = ET.SubElement(switchport, "access") vlan = ET.SubElement(access, "vlan") name = ET.SubElement(vlan, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_vlan_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport = ET.SubElement(vlan_profile, "switchport") access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification") access = ET.SubElement(access_mac_vlan_classification, "access") vlan = ET.SubElement(access, "vlan") access_mac_address_key = ET.SubElement(vlan, "access-mac-address") access_mac_address_key.text = kwargs.pop('access_mac_address') access_vlan_id = ET.SubElement(vlan, "access-vlan-id") access_vlan_id.text = kwargs.pop('access_vlan_id') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport = ET.SubElement(vlan_profile, "switchport") access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification") access = ET.SubElement(access_mac_vlan_classification, "access") vlan = ET.SubElement(access, "vlan") access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id") access_vlan_id_key.text = kwargs.pop('access_vlan_id') access_mac_address = ET.SubElement(vlan, "access-mac-address") access_mac_address.text = kwargs.pop('access_mac_address') callback = kwargs.pop('callback', self._callback) return callback(config) def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_vlan_id(self, **kwargs): """Auto Generated Code """
<gh_stars>1000+ """ Documents: * libwx source code: see fib.c source code * "Microsoft Word 97 Binary File Format" http://bio.gsi.de/DOCS/AIX/wword8.html Microsoft Word 97 (aka Version 8) for Windows and Macintosh. From the Office book, found in the Microsoft Office Development section in the MSDN Online Library. HTMLified June 1998. Revised Aug 1 1998, added missing Definitions section. Revised Dec 21 1998, added missing Document Properties (section). """ from hachoir_core.field import (FieldSet, Enum, Bit, Bits, UInt8, Int16, UInt16, UInt32, Int32, NullBytes, Bytes, RawBytes, PascalString8, PascalString16, CString, String, TimestampMac32, TimestampWin64) from hachoir_core.text_handler import displayHandler from hachoir_core.endian import LITTLE_ENDIAN from hachoir_parser import guessParser from hachoir_parser.misc.ole2_util import OLE2FragmentParser from hachoir_parser.common.win32_lang_id import LANGUAGE_ID CREATOR_ID={0x6A62: "Microsoft Word"} class ShortArray(FieldSet): def createFields(self): yield UInt16(self, "csw", "Count of fields in the array of shorts") self._size = self['csw'].value*16+16 yield Enum(UInt16(self, "wMagicCreated", "File creator ID"), CREATOR_ID) yield Enum(UInt16(self, "wMagicRevised", "File last modifier ID"), CREATOR_ID) yield UInt16(self, "wMagicCreatePrivate") yield UInt16(self, "wMagicCreatedPrivate") yield UInt16(self, "pnFbpChpFirst_W6") yield UInt16(self, "pnChpFirst_W6") yield UInt16(self, "cpnBteChp_W6") yield UInt16(self, "pnFbpPapFirst_W6") yield UInt16(self, "pnPapFirst_W6") yield UInt16(self, "cpnBtePap_W6") yield UInt16(self, "pnFbpLvcFirst_W6") yield UInt16(self, "pnLvcFirst_W6") yield UInt16(self, "cpnBteLvc_W6") yield Enum(UInt16(self, "lidFE", "Language ID if a Far East version of Word was used"), LANGUAGE_ID) while self.current_size < self.size: yield Int16(self, "unknown[]") def buildDateHandler(v): md,y=divmod(v,100) m,d=divmod(md,100) if y < 60: y=2000+y else: y=1900+y return "%04i-%02i-%02i"%(y,m,d) class LongArray(FieldSet): def createFields(self): yield UInt16(self, "clw", "Count of fields in the array of longs") self._size = self['clw'].value*32+16 yield Int32(self, "cbMax", "Stream offset of last byte + 1") yield displayHandler(UInt32(self, "lProductCreated", "Date when the creator program was built"),buildDateHandler) yield displayHandler(UInt32(self, "lProductRevised", "Date when the last modifier program was built"),buildDateHandler) yield UInt32(self, "ccpText", "Length of main document text stream") yield Int32(self, "ccpFtn", "Length of footnote subdocument text stream") yield Int32(self, "ccpHdr", "Length of header subdocument text stream") yield Int32(self, "ccpMcr", "Length of macro subdocument text stream") yield Int32(self, "ccpAtn", "Length of annotation subdocument text stream") yield Int32(self, "ccpEdn", "Length of endnote subdocument text stream") yield Int32(self, "ccpTxbx", "Length of textbox subdocument text stream") yield Int32(self, "ccpHdrTxbx", "Length of header textbox subdocument text stream") yield Int32(self, "pnFbpChpFirst", "Start of CHPX (Character Property) sector chain (sector = 512-byte 'page')") yield Int32(self, "pnChpFirst", "First CHPX sector") yield Int32(self, "cpnBteChp", "Number of CHPX sectors in the file") yield Int32(self, "pnFbpPapFirst", "Start of PAPX (Paragraph Property) sector chain") yield Int32(self, "pnPapFirst", "First PAPX sector") yield Int32(self, "cpnBtePap", "Number of PAPX sectors in the file") yield Int32(self, "pnFbpLvcFirst", "Start of LVC sector chain") yield Int32(self, "pnLvcFirst", "First LVC sector") yield Int32(self, "cpnBteLvc", "Number of LVC sectors in the file") yield Int32(self, "fcIslandFirst") yield Int32(self, "fcIslandLim") while self.current_size < self.size: yield Int32(self, "unknown[]") class FCLCB(FieldSet): static_size=64 def createFields(self): yield Int32(self, "fc", "Table Stream Offset") yield UInt32(self, "lcb", "Byte Count") def createValue(self): return (self['fc'].value,self['lcb'].value) class FCLCBArray(FieldSet): def createFields(self): yield UInt16(self, "cfclcb", "Count of fields in the array of FC/LCB pairs") self._size = self['cfclcb'].value*64+16 yield FCLCB(self, "StshfOrig", "Original STSH allocation") yield FCLCB(self, "Stshf", "Current STSH allocation") yield FCLCB(self, "PlcffndRef", "Footnote reference (FRD) PLC") yield FCLCB(self, "PlcffndTxt", "Footnote text PLC") yield FCLCB(self, "PlcfandRef", "Annotation reference (ATRD) PLC") yield FCLCB(self, "PlcfandTxt", "Annotation text PLC") yield FCLCB(self, "Plcfsed", "Section descriptor (SED) PLC") yield FCLCB(self, "Plcpad", "No longer used; used to be Plcfpgd (Page descriptor PLC)") yield FCLCB(self, "Plcfphe", "Paragraph heights (PHE) PLC (only for Complex files)") yield FCLCB(self, "Sttbfglsy", "Glossary string table") yield FCLCB(self, "Plcfglsy", "Glossary PLC") yield FCLCB(self, "Plcfhdd", "Header (HDD) PLC") yield FCLCB(self, "PlcfbteChpx", "Character property bin table PLC") yield FCLCB(self, "PlcfbtePapx", "Paragraph property bin table PLC") yield FCLCB(self, "Plcfsea", "Private Use PLC") yield FCLCB(self, "Sttbfffn", "Font information STTB") yield FCLCB(self, "PlcffldMom", "Main document field position (FLD) PLC") yield FCLCB(self, "PlcffldHdr", "Header subdocument field position (FLD) PLC") yield FCLCB(self, "PlcffldFtn", "Footnote subdocument field position (FLD) PLC") yield FCLCB(self, "PlcffldAtn", "Annotation subdocument field position (FLD) PLC") yield FCLCB(self, "PlcffldMcr", "No longer used") yield FCLCB(self, "Sttbfbkmk", "Bookmark names STTB") yield FCLCB(self, "Plcfbkf", "Bookmark begin position (BKF) PLC") yield FCLCB(self, "Plcfbkl", "Bookmark end position (BKL) PLC") yield FCLCB(self, "Cmds", "Macro commands") yield FCLCB(self, "Plcmcr", "No longer used") yield FCLCB(self, "Sttbfmcr", "No longer used") yield FCLCB(self, "PrDrvr", "Printer Driver information") yield FCLCB(self, "PrEnvPort", "Printer environment for Portrait mode") yield FCLCB(self, "PrEnvLand", "Printer environment for Landscape mode") yield FCLCB(self, "Wss", "Window Save State") yield FCLCB(self, "Dop", "Document Property data") yield FCLCB(self, "SttbfAssoc", "Associated strings STTB") yield FCLCB(self, "Clx", "Complex file information") yield FCLCB(self, "PlcfpgdFtn", "Not used") yield FCLCB(self, "AutosaveSource", "Original filename for Autosave purposes") yield FCLCB(self, "GrpXstAtnOwners", "String Group for Annotation Owner Names") yield FCLCB(self, "SttbfAtnbkmk", "Annotation subdocument bookmark names STTB") yield FCLCB(self, "PlcdoaMom", "No longer used") yield FCLCB(self, "PlcdoaHdr", "No longer used") yield FCLCB(self, "PlcspaMom", "Main document File Shape (FSPA) PLC") yield FCLCB(self, "PlcspaHdr", "Header subdocument FSPA PLC") yield FCLCB(self, "PlcfAtnbkf", "Annotation subdocument bookmark begin position (BKF) PLC") yield FCLCB(self, "PlcfAtnbkl", "Annotation subdocument bookmark end position (BKL) PLC") yield FCLCB(self, "Pms", "Print Merge State") yield FCLCB(self, "FormFldSttbs", "Form field values STTB") yield FCLCB(self, "PlcfendRef", "Endnote Reference (FRD) PLC") yield FCLCB(self, "PlcfendTxt", "Endnote Text PLC") yield FCLCB(self, "PlcffldEdn", "Endnote subdocument field position (FLD) PLC)") yield FCLCB(self, "PlcfpgdEdn", "not used") yield FCLCB(self, "DggInfo", "Office Art Object Table Data") yield FCLCB(self, "SttbfRMark", "Editor Author Abbreviations STTB") yield FCLCB(self, "SttbCaption", "Caption Title STTB") yield FCLCB(self, "SttbAutoCaption", "Auto Caption Title STTB") yield FCLCB(self, "Plcfwkb", "WKB PLC") yield FCLCB(self, "Plcfspl", "Spell Check State PLC") yield FCLCB(self, "PlcftxbxTxt", "Text Box Text PLC") yield FCLCB(self, "PlcffldTxbx", "Text Box Reference (FLD) PLC") yield FCLCB(self, "PlcfhdrtxbxTxt", "Header Text Box Text PLC") yield FCLCB(self, "PlcffldHdrTxbx", "Header Text Box Reference (FLD) PLC") yield FCLCB(self, "StwUser", "Macro User storage") yield FCLCB(self, "Sttbttmbd", "Embedded TrueType Font Data") yield FCLCB(self, "Unused") yield FCLCB(self, "PgdMother", "Main text page descriptors PLF") yield FCLCB(self, "BkdMother", "Main text break descriptors PLF") yield FCLCB(self, "PgdFtn", "Footnote text page descriptors PLF") yield FCLCB(self, "BkdFtn", "Footnote text break descriptors PLF") yield FCLCB(self, "PgdEdn", "Endnote text page descriptors PLF") yield FCLCB(self, "BkdEdn", "Endnote text break descriptors PLF") yield FCLCB(self, "SttbfIntlFld", "Field keywords STTB") yield FCLCB(self, "RouteSlip", "Mailer Routing Slip") yield FCLCB(self, "SttbSavedBy", "STTB of names of users who have saved the document") yield FCLCB(self, "SttbFnm", "STTB of filenames of documents referenced by this one") yield FCLCB(self, "PlcfLst", "List Format information PLC") yield FCLCB(self, "PlfLfo", "List Format Override information PLC") yield FCLCB(self, "PlcftxbxBkd", "Main document textbox break table (BKD) PLC") yield FCLCB(self, "PlcftxbxHdrBkd", "Header subdocument textbox break table (BKD) PLC") yield FCLCB(self, "DocUndo", "Undo/Versioning data") yield FCLCB(self, "Rgbuse", "Undo/Versioning data") yield FCLCB(self, "Usp", "Undo/Versioning data") yield FCLCB(self, "Uskf", "Undo/Versioning data") yield FCLCB(self, "PlcupcRgbuse", "Undo/Versioning data") yield FCLCB(self, "PlcupcUsp", "Undo/Versioning data") yield FCLCB(self, "SttbGlsyStyle", "Glossary entry style names STTB") yield FCLCB(self, "Plgosl", "Grammar options PL") yield FCLCB(self, "Plcocx", "OCX data PLC") yield FCLCB(self, "PlcfbteLvc", "Character property bin table PLC") if self['../fMac'].value: yield TimestampMac32(self, "ftModified", "Date last modified") yield Int32(self, "padding[]") else: yield TimestampWin64(self, "ftModified", "Date last modified") yield FCLCB(self, "Plcflvc", "LVC PLC") yield FCLCB(self, "Plcasumy", "Autosummary PLC") yield FCLCB(self, "Plcfgram", "Grammar check PLC") yield FCLCB(self, "SttbListNames", "List names STTB") yield FCLCB(self, "SttbfUssr", "Undo/Versioning data") while self.current_size < self.size: yield FCLCB(self, "unknown[]") class FIB(FieldSet): def createFields(self): yield UInt16(self, "wIdent", "Magic Number") yield UInt16(self, "nFib", "File Information Block (FIB) Version") yield UInt16(self, "nProduct", "Product Version") yield Enum(UInt16(self, "lid", "Language ID"), LANGUAGE_ID) yield Int16(self, "pnNext") yield Bit(self, "fDot", "Is the document a document template?") yield Bit(self, "fGlsy", "Is the document a glossary?") yield Bit(self, "fComplex", "Is the document in Complex format?") yield Bit(self, "fHasPic", "Does the document have embedded images?") yield Bits(self, "cQuickSaves", 4, "Number of times the document was quick-saved") yield Bit(self, "fEncrypted", "Is the document encrypted?") yield Bits(self, "fWhichTblStm", 1, "Which table stream (0Table or 1Table) to use") yield Bit(self, "fReadOnlyRecommended", "Should the file be opened read-only?") yield Bit(self, "fWriteReservation", "Is the file write-reserved?") yield Bit(self, "fExtChar", "Does the file use an extended character set?") yield Bit(self, "fLoadOverride") yield Bit(self, "fFarEast") yield Bit(self, "fCrypto") yield UInt16(self, "nFibBack", "Document is backwards compatible down to this FIB version") yield UInt32(self, "lKey", "File encryption key (only if fEncrypted)") yield Enum(UInt8(self,
# -*- coding: utf-8; py-indent-offset: 2 -*- """ This module provides tools for examining a set of vectors and find the geometry that best fits from a set of built in shapes. """ from __future__ import absolute_import, division, print_function from scitbx.matrix import col from collections import OrderedDict try: from collections.abc import Iterable except ImportError: from collections import Iterable from math import sqrt from six.moves import zip def _bond_angles(vectors): """ Creates a list of angles (In degrees) between all two-element combinations in vectors. Parameters ---------- vectors : scitbx.matrix.col Returns ------- list of float """ return [(v1, v2, v1.angle(v2, deg=True)) for index, v1 in enumerate(vectors) for v2 in vectors[index + 1:]] def _is_tetrahedron(vectors, dev_cutoff=20): """ Tetrahedrons have four vertices, with angles between all pairs of vertices uniformly about 104.5 degrees. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) > 4 or len(vectors) < 3: return angles = _bond_angles(vectors) deviation = sqrt(sum(abs(i[2] - 104.5) ** 2 for i in angles) / len(vectors)) if deviation <= dev_cutoff: return deviation, 4 - len(vectors) def _is_trigonal_plane(vectors, dev_cutoff=20): """ Triangular planar geometry has three vertices (By definition all on the same equatorial plane). The expected angles are 120 degrees between neighboring vertices. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 3: return angles = _bond_angles(vectors) a_120s = [] for angle in angles: a_120s.append(angle[2] - 120) deviation = sqrt(sum(i ** 2 for i in a_120s) / len(angles)) if deviation <= dev_cutoff: return deviation, 3 - len(vectors) def _is_square_plane(vectors, dev_cutoff=20): """ Square planar geometry has four vertices, all on the same equatorial plane. The expected angles are 90 degrees between neighboring vertices and 180 degrees between vertices across from one another. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 4: return angles = _bond_angles(vectors) # Expect 2x 180 degrees and 4x 90 degrees a_90s = [] a_180s = [] for angle in angles: if abs(angle[2] - 90) < abs(angle[2] - 180): a_90s.append(angle[2] - 90) else: a_180s.append(angle[2] - 180) # With up to one atom missing, we must have 2 to 4 90 degree angles and 1 to 2 # 180 degree angles if len(a_90s) < 2 or len(a_90s) > 4 or len(a_180s) < 1 or len(a_180s) > 2: return deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles)) if deviation <= dev_cutoff: return deviation, 4 - len(vectors) def _is_square_pyramid(vectors, dev_cutoff=20): """ Square bipyramids have five vertices, four on the same equatorial plane with one above. The expected angles are all either 90 degrees or 180 degrees. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 5: return angles = _bond_angles(vectors) a_90s, a_180s = [], [] for angle in angles: if abs(angle[2] - 90) < abs(angle[2] - 180): a_90s.append(angle[2] - 90) else: a_180s.append(angle[2] - 180) if len(a_90s) != 8 or len(a_180s) != 2: return deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles)) if deviation <= dev_cutoff: return deviation, 5 - len(vectors) def _is_octahedron(vectors, dev_cutoff=20): """ Octahedrons have six vertices (Their name comes from their eight faces). The expected angles are all either 90 degrees (Next to each other), or 180 degrees (Across from each other). Another name for this shape is square bipyramidal. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 6: return angles = _bond_angles(vectors) a_90s, a_180s = [], [] for angle in angles: if abs(angle[-1] - 90) < abs(angle[-1] - 180): a_90s.append(angle[-1] - 90) else: a_180s.append(angle[-1] - 180) if len(a_180s) > 3 or len(a_180s) < 2 or len(a_90s) < 8 or len(a_90s) > 12: return deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles)) if deviation <= dev_cutoff: return deviation, 6 - len(vectors) def _is_trigonal_pyramid(vectors, dev_cutoff=15): """ Trigional pyramids have four vertices. Three vertices form a plane with angles of 120 degrees between each pair. The last vertex resides axial to the plane, at 90 degrees from all of the equatorial vertices. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 4: return angles = _bond_angles(vectors) a_90s, a_120s = [], [] for angle in angles: if abs(angle[2] - 90) < abs(angle[2] - 120): a_90s.append(angle[2] - 90) else: a_120s.append(angle[2] - 120) if len(a_90s) < 2 or len(a_90s) > 4 or len(a_120s) < 2 or len(a_120s) > 4: return deviation = sqrt(sum(i ** 2 for i in a_90s + a_120s) / len(angles)) if deviation <= dev_cutoff: return deviation, 4 - len(vectors) def _is_trigonal_bipyramid(vectors, dev_cutoff=15): """ Trigonal bipyramids have five vertices. Three vertices form a plane in the middle and the angles between all three are 120 degrees. The two other vertices reside axial to the plane, at 90 degrees from all the equatorial vertices. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) > 5 or len(vectors) < 4: return angles = _bond_angles(vectors) # Grab the two axial vectors ax1, ax2, axial_angle = max(angles, key=lambda x: abs(x[-1])) if axial_angle < 150: # Missing one of the two axial vectors, just quit return base_to_axials = [] equatorial_angles = [] for v1, v2, angle in angles: # Python has no boolean xor! # Grab the angles between the two endpoints of the bipyramid and the base if (v1 in [ax1, ax2]) != (v2 in [ax1, ax2]): base_to_axials += angle, elif (v1 not in [ax1, ax2]) and (v2 not in [ax1, ax2]): equatorial_angles += angle, deviants = [axial_angle - 180] deviants += [i - 90 for i in base_to_axials] deviants += [i - 120 for i in equatorial_angles] deviation = sqrt(sum(i ** 2 for i in deviants) / len(deviants)) if deviation <= dev_cutoff: return deviation, 5 - len(vectors) def _is_pentagonal_bipyramid(vectors, dev_cutoff=15): """ Pentagonal bipyramids have seven vertices. Five vertices form a plane in the middle and the angles between all five are 72 degrees. The two other vertices reside axial to the plane, at 90 degrees from all the equatorial vertices. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) > 7 or len(vectors) < 6: return angles = _bond_angles(vectors) # Determine which two vectors define the axial angles axials = [] for v1 in vectors: v_angles = [] for v2 in vectors: if v2 != v1: v_angles.append(v1.angle(v2, deg=True)) a_180s = len([i for i in v_angles if abs(i - 180) < 20]) a_90s = len([i for i in v_angles if abs(i - 90) < 20]) if a_180s > 0 and a_90s > 4: axials.append(v1) if len(axials) != 2: # Couldn't determine axial angles return ax1, ax2 = axials axial_angle = ax1.angle(ax2, deg=True) base_to_axials = [] equatorial_angles = [] for v1, v2, angle in angles: # Python has no boolean xor! # Grab the angles between the two endpoints of the bipyramid and the base if (v1 in [ax1, ax2]) != (v2 in [ax1, ax2]): base_to_axials += angle, elif (v1 not in [ax1, ax2]) and (v2 not in [ax1, ax2]): equatorial_angles += angle, deviants = [axial_angle - 180] deviants += [i - 90 for i in base_to_axials] deviants += [min(abs(i - 72), abs(i - 144)) for i in equatorial_angles] deviation = sqrt(sum(i ** 2 for i in deviants) / len(deviants)) if deviation <= dev_cutoff: return deviation, 7 - len(vectors) def _is_trigonal_prism(vectors, dev_cutoff=15): """ Triangular prisms are defined by 3 vertices in a triangular pattern on two aligned planes. Unfortunately, the angles are dependent on the length and width of the prism. Need more examples to come up with a better way of detecting this shape. For now, this code is experimental. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 6: return angles = _bond_angles(vectors) a_85s, a_135s = [],
<reponame>caldarolamartin/hyperion """ ========================== ANC350 Attocube Instrument ========================== This is the instrument level of the position ANC350 from Attocube (in the Montana) """ from hyperion import logging import yaml #for the configuration file import os #for playing with files in operation system import time import numpy as np import matplotlib.pyplot as plt from hyperion import root_dir from hyperion.instrument.base_instrument import BaseInstrument from hyperion import ur class Anc350Instrument(BaseInstrument): """ Anc 350 instrument class. """ def __init__(self, settings): """ init of the class""" super().__init__(settings) self.logger = logging.getLogger(__name__) self.attocube_piezo_dict = {} self.Amplitude = np.zeros(3) #here we will remember which amplitudes are set on the steppers self.Stepwidth = np.zeros(3) #here we will remember what the amplitudes means in terms of stepsize self.Frequency = np.zeros(3) self.Speed = np.zeros(3) if 'temperature' in settings: self.temperature = settings['temperature'] else: self.temperature = 300 self.stop = False self.logger.info('Welcome to the instrument of the Attocube') self.current_positions = {'XPiezoStepper': 'unavailable', 'YPiezoStepper': 'unavailable', 'ZPiezoStepper': 'unavailable', 'XPiezoScanner': 'unavailable','YPiezoScanner':'unavailable','ZPiezoScanner': 'unavailable'} self.initialize() def initialize(self): """ | Starts the connection to the device by initializing the controller. | Loads the axis names from the yml file. | Runs set_temperature_limits. """ self.logger.debug('Opening connection to anc350 and loading the axis names yml file.') self.controller.initialize() filename = os.path.join(root_dir, 'instrument', 'position', 'attocube_config.yml') with open(filename, 'r') as f: self.attocube_piezo_dict = yaml.load(f, Loader=yaml.FullLoader) self.set_temperature_limits() def set_temperature_limits(self): """The maximum voltage to put on the piezo scanners depends on the temperature in the cryostat. The user has to give that. The maximum ranges from 60V at room temperature to 140V at 4K, and everything in between is linearly interpolated. """ self.logger.debug('The given cryostat temperature is {}K'.format(self.temperature)) a = (140-60)/(4-300) b = 140 - 4*a # possible_temperatures = np.linspace(4,300,20) # limits = a*possible_temperatures + b # plt.figure() # plt.plot(possible_temperatures, limits) # plt.show() if self.temperature < 4: self.logger.warning('Trying to put a temperature below 4K') elif self.temperature < 301: self.max_dC_level = a * self.temperature + b self.max_dC_level = round(self.max_dC_level) * ur('V') self.logger.debug('max_dC_level interpolated to {}'.format(self.max_dC_level)) else: self.max_dC_level = 60 * ur('V') self.logger.debug('Maximum voltage on scanner piezo: {}'.format(self.max_dC_level)) self.max_dC_level_300K = 60 *ur('V') def get_position(self, axis): """ Asks the position from the controller level. This method is useful in higher levels where you want to display the position. :param axis: stepper axis, XPiezoStepper, YPiezoStepper, or XPiezoScanner, etc. :type axis: string """ ax = self.attocube_piezo_dict[axis]['axis'] # otherwise you keep typing this if 'Stepper' in axis: self.current_positions[axis] = round(self.controller.getPosition(ax) * ur('nm').to('mm'), 6) elif 'Scanner' in axis: self.current_positions[axis] = round(self.controller.getDcLevel(ax) * ur('mV'),6) def update_all_positions(self): """Uses self.get_position to ask the position on all axes. This method is useful in higher levels where you want to display the position. """ for axis in self.attocube_piezo_dict: self.get_position(axis) def configure_stepper(self, axis, amplitude, frequency): """ - Does the necessary configuration of the Stepper: - the Stepper seem to perform best when put in Amplitude Control mode, nr. 1 - loads the actor file, files are in controller folder, their names hardcoded in controller init - sets the amplitude and frequency - the amplitude influences the step width, the frequency influences the speed - also stores the current position of the axis in self.current_positions :param axis: stepper axis to be set, XPiezoStepper, YPiezoStepper or ZPiezoStepper :type axis: string :param amplitude: amplitude voltage; at room temperature you need 30V-40V, at low temperatures 40V-50V, max 60V; high amplitude is large stepwidth :type axis: pint quantity :param frequency: frequency to be set; higher means more noise but faster; between 1Hz and 2kHz :type axis: pint quantity """ ax = self.attocube_piezo_dict[axis]['axis'] # otherwise you keep typing this self.logger.debug('Loading Stepper actor file, putting amplitude and frequency') self.controller.load(ax) self.get_position(axis) self.controller.amplitudeControl(ax,1) self.logger.debug('Stepper Amplitude Control put in Amplitude mode') if 0 <= amplitude.m_as('mV') <= self.controller.max_amplitude_mV: self.logger.debug('checking if the amplitude is okay') self.controller.amplitude(ax, int(amplitude.m_as('mV'))) # put the amplitude on the controller, it needs to be an int self.Amplitude[ax] = amplitude.m_as('V') #remember that amplitude in V ampl = self.controller.getAmplitude(ax) * ur('mV') self.logger.debug('amplitude is now ' + str(ampl.to('V'))) step = self.controller.getStepwidth(ax) #huh, 0 makes no sense!!!! self.Stepwidth[ax] = step #remember the associated step width self.logger.debug('so the step width is ' + str(step * ur('nm'))) else: self.logger.warning('The required amplitude needs to be between 0V and 60V') return if 1 <= frequency.m_as('Hz') <= self.controller.max_frequency_Hz: self.logger.debug('checking if the frequency is okay') self.controller.frequency(ax, frequency.m_as('Hz')) #put the frequency on the controller; this needs to be an int (not float) self.Frequency[ax] = frequency.m_as('Hz') #remember that frequency speed = self.controller.getSpeed(ax) *ur('nm/s') #remember the associated speed self.Speed[ax] = speed.m_as('nm/s') self.logger.debug('frequency is ' + str(self.controller.getFrequency(ax) * ur('Hz'))) self.logger.debug('so the speed is ' + str(round(speed.to('mm/s'),4))) else: self.logger.warning('The required frequency needs to be between 1Hz and 2kHz') return def capacitance(self,axis): """Measures the capacitance of the stepper or scanner; no idea why you would want to do that. :param axis: scanner axis to be set, XPiezoScanner, YPiezoScanner, XPiezoStepper, etc. :type axis: string """ capacitance = self.controller.capMeasure(self.attocube_piezo_dict[axis]['axis']) * ur('mF') capacitance = round(capacitance.to('F'), 3) self.logger.debug(axis + ': ' + str(capacitance)) def configure_scanner(self, axis): """- Does the necessary configuration of the Scanner: - you need to set the mode to INT, not DC-IN :param axis: scanner axis to be set, XPiezoScanner, YPiezoScanner or ZPiezoScanner :type axis: string """ self.logger.debug('Putting Scanner setting in INT mode') self.controller.intEnable(self.attocube_piezo_dict[axis]['axis'],True) self.logger.debug('is the scanner on INT mode? ' + str(self.controller.getIntEnable(self.attocube_piezo_dict[axis]['axis']))) def check_if_moving(self,axis,position): """| **work in progress!** | Checks whether the piezo is actually moving. | It checks if you are not out of range, or putting a too low voltage. | If that's okay, it keeps checking whether you are actually moving. | However, the status of the piezo is not always correct, and the movement is not linear, so this method is not finished yet. | It also keeps checking whether self.stop is True, and asking the position. This can be used in higher levels with threads and timers. :param axis: scanner axis to be set, XPiezoStepper, YPiezoStepper or ZPiezoStepper :type axis: string :param position: absolute position that you want to go to; needs to be an integer, no float! :type axis: pint quantity :return: The end position, so the moving method can compare that with the start position """ Position = np.zeros(5) ax = self.attocube_piezo_dict[axis]['axis'] #otherwise you keep typing this start_range = ur(self.attocube_piezo_dict[axis]['start_range']) end_range = ur(self.attocube_piezo_dict[axis]['end_range']) # check what's happening current_pos = self.controller.getPosition(ax)*ur('nm') self.get_position(axis) self.logger.debug(axis + 'starts at position ' + str(round(current_pos.to('mm'),6))) ismoved = False self.logger.debug('0 < current_pos < 5mm? {}'.format(position < 0.0*ur('mm') or position > 5.0*ur('mm'))) # pay attention: position is what you gave to this method, the position where you want to move to # current_pos is what you asked the positioner is at now # new_pos is the position to compare the old position with, to check whether you are actually moving at all self.logger.debug('start of range: '+str(ur(self.attocube_piezo_dict[axis]['start_range']).m_as('mm'))) self.logger.debug('stop of range: ' + str(ur(self.attocube_piezo_dict[axis]['end_range']))) if position.m_as('mm') < start_range.m_as('mm') or position.m_as('mm') > end_range.m_as('mm'): self.logger.warning('Trying to move out of range') self.stop_moving(axis) #you have to do this, otherwise it keeps trying forever end = current_pos return (end, ismoved) elif self.Amplitude[ax] < 1.0: #if you forgot, it might be 0 V self.stop_moving(axis) #you have to do this, otherwise it keeps trying forever end = current_pos self.logger.warning('Maybe you should configurate this Stepper axis and set a voltage') return (end, ismoved) else: time.sleep(0.5) # important not to put too short, otherwise it already starts asking before the guy even knows whether he moves while self.controller.getStatus(ax)['moving']: #self.logger.debug('controller moving? '+str(self.controller.getStatus(ax)['moving'])) self.get_position(axis) self.logger.debug('{}'.format(self.current_positions[axis])) time.sleep(0.1) if self.stop: self.logger.info('Stopping approaching') self.stop = False break # time.sleep(0.5) # while self.controller.getStatus(ax)['moving']: # self.logger.debug('status of controller: ' + str(self.controller.getStatus(ax)['moving'])) # # sleeptime = 1.0 # time.sleep(sleeptime) # new_pos = self.controller.getPosition(ax)*ur('nm') # self.logger.info(axis + 'moving, currently at ' + str(round(new_pos.to('mm'), 6))) # # self.logger.debug(self.Speed[ax]*sleeptime*ur('nm')) # # # # current_pos = new_pos # ismoved = True # end = current_pos # return (end, ismoved) # time.sleep(0.5) # important not to put too short, otherwise it already starts asking before the guy even knows whether he moves # while self.controller.getStatus(ax)['moving']: # self.logger.debug('status of controller: '+str(self.controller.getStatus(ax)['moving'])) # time.sleep(1.0) #important not to put too
<filename>Text.py from Base import Buttons from Slider import Slider import os os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "" import pygame class Text(Buttons): """ A simple (multi-line) text object, with scrolling support. pos: (left, top) - The topleft position before scaling. size: (width, height) - The size before scaling. text: str - The text that will be rendered to the surface. style: "Square", "Round", int - Defines the radius of curvature of the buttons' corners. font_name: str - The name of the font that should be used for the Text. Must lead to a valid font when used in pygame.font.Font(). font_size: int - The size (in px) of the text. text_colour: (R, G, B) - The colour of the text in the Text object. text_offset: "auto", int, (x, y) - The offset the text should have from the sides of the Text object. Prevents the text from overlapping with borders, and touching the edges. scroll_bar: None, int, Slider - The type of scrollbar to be included. Default styles 1 and 2 are available. background: pygame.Surface, (R, G, B), None, function - The background of the button. If a function is given, it will be called in Make_background as 'function(self)'. border: ((R, G, B), width, offset), None - The border that appears around the TextBox. func_data: dict - Contains potential additional data for use by custom background drawing functions. groups: None, [___, ___] - A list of all groups to which a button is to be added. independent: bool - Determines whether or not the button is allowed to set the input_lock, and is added to buttons.list_all. Mostly important for buttons which are part of another button. Inputs: *.value: str - Can be used synonymously with *.text. *.text: str - Allows the user to set a new value for the Text objects' displayed text. *.lines: tuple - Allows the user to set a new value for 'lines' (the text as it is split to fit properly accros the lines). *.write(value) - Appends text to self.text. Allows this button to be used as an output for e.g. the print() function. Outputs: *.value: str - Synonymous with *.text. *.text: str - The current text being rendered to the surface. *.lines: tuple - The current text being rendered to the surface, as it is split to prevent it from exceeding the Surface borders. """ actions = ["Scroll", "LMB_down", "LMB_up", "Set_cursor_pos"] def __init__(self, pos, size, text = "", style = "Square", font_name = pygame.font.get_default_font(), font_size = 20, text_colour = (0, 0, 0), text_offset = "auto", scroll_bar = None, background = None, border = None, func_data = {}, group = None, independent = False, ): """ Create a Text Button object. See help(type(self)) for more detailed information. """ super().__init__(pos, size, font_name, font_size, group, independent) self.style = style self.text_colour = self.Verify_colour(text_colour) self.bg = self.Verify_background(background) self.border = self.Verify_border(border) #Set the offset the text has from the sides of the text_box if type(text_offset) is int: self.text_offset = 2 * (text_offset,) elif type(text_offset) is not str: self.text_offset = self.Verify_iterable(text_offset, 2) elif text_offset.lower() == "auto": #The automatic offset is calculated as 0.25 * font_size + (border_width + border_offset if there is a border) #Offset is not 0 if no border is given, to be consistent with TextBox Buttons #It can of course still be 0 if the user sets text_offset = 0 self.text_offset = 2 * (round(self.font_size / 4) + ((border[1] + border[2]) if self.border else 0),) if scroll_bar: self.scroll_bar = Make_scroll_bar(self, scroll_bar) self.children.append(self.scroll_bar) else: self.scroll_bar = None self.text = text self.__scrolled = 0 self.Build_lines() self.func_data = func_data self.Draw(pygame.Surface((1, 1))) #Makes sure all attributes are set-up correctly def LMB_down(self, pos): pos = self.relative(pos) if self.scroll_bar: self.scroll_bar.LMB_down(pos) if self.Buttons.input_claim: #If the slider contained the position, and now claimed the input, set self as the lock self.Set_lock() def LMB_up(self, pos): pos = self.relative(pos) if self.scroll_bar: self.scroll_bar.LMB_up(pos) if self.Buttons.input_claim: self.Release_lock() def Set_cursor_pos(self, pos): pos = self.relative(pos) if self.scroll_bar: self.scroll_bar.Set_cursor_pos(pos) def Scroll(self, value, pos): if not self.contains(pos): #If the mouse was not within the text box: return self.scrolled += value self.Buttons.input_claim = True self.Buttons.input_processed = True def Draw(self, screen): """ Draw the button to the screen. """ self.scrolled #Update the scrolled position quickly if self.updated: self.Build_lines() self.moved = True #Make the background surface self.bg_surface = self.Make_background_surface(self.bg) if self.border: self.Draw_border(self.bg_surface, *self.border) #Build the surface containing ALL lines of text font_height = self.font.get_height() self.text_surface = pygame.Surface(self.scaled((self.width - 2 * self.text_offset[0] - (self.scroll_bar.width if self.scroll_bar else 0), font_height * len(self.lines))), pygame.SRCALPHA) for line_nr, line in enumerate(self.lines): self.text_surface.blit(self.font.render(line, True, self.text_colour), (0, line_nr * font_height)) self.updated = False if self.moved: #Blit the fully rendered text surface onto a limiter surface. text_limiter = pygame.Surface(self.scaled((self.width - 2 * self.text_offset[0] - (self.scroll_bar.width + self.text_offset[0] if self.scroll_bar else 0), self.height - 2 * self.text_offset[1])), pygame.SRCALPHA) text_limiter.blit(self.text_surface, (0, -self.scaled(self.scrolled))) #Blit the text surface onto the actual background self.surface = self.bg_surface.copy() self.surface.blit(text_limiter, self.scaled(self.text_offset)) if self.scroll_bar: self.scroll_bar.Draw(self.surface) self.moved = False screen.blit(self.surface, self.scaled(self.topleft)) return def write(self, value): """ Append value to self.text. Allows for a Text object to be used as an output "file" for e.g. print. """ self.text += value @property def scrolled(self): if self.scroll_bar and self.scroll_bar.moved: self.scrolled = round(self.scroll_bar.value) return(self.__scrolled) @scrolled.setter def scrolled(self, value): text_height = self.font_size * len(self.lines) #Get the total height of all text in the text box #Make sure the scrolled value cannot exceed the limits of the space in the box value = round(self.Clamp(value, 0, max(0, text_height - self.height + 2 * self.text_offset[1]))) self.moved = True #Return if the scroll value has not been updated if value == self.__scrolled: return self.__scrolled = value if self.scroll_bar: self.scroll_bar.value = value return @property def value(self): return self.text @value.setter def value(self, val): self.text = val @property def text(self): return(self.__text) @text.setter def text(self, value): if type(value) is not str: raise TypeError(f"Text should be type str, not type {type(value).__name__}.") self.__text = value self.updated = True @property def lines(self): return(self.__lines) @lines.setter def lines(self, value): """ """ #For external use only. Internally, all writing calls are directly to self.__lines if type(value) not in (tuple, list,): raise TypeError(f"Lines must be type 'tuple' or type 'list', not type {type(value).__name__}") self.__lines = tuple(value) self.__text = "\n".join(self.__lines) self.updated = True def Build_lines(self): """ (Re-)builds the '*.lines' tuple based on the current value of self.text, such that the text will automatically wrap around to the next line if it won't fit on the current line anymore. Called automatically after *.text is set. """ max_width = self.scaled(self.width - 2 * self.text_offset[0] - (self.scroll_bar.width + self.text_offset[0] if self.scroll_bar else 0)) text_lines = self.text.split("\n") lines = [] for line in text_lines: line_string = "" words = line.split(" ") for word in words: if line_string == "": #If the line is still empty: line_string = word #Simply place the word at the start of the string. elif self.font.size(" ".join([line_string, word]))[0] <= max_width: #If the next word still fits on this line: line_string = " ".join([line_string, word]) #Join it together with the existing text else: #If the word is too long to fit on the line: lines.append(line_string) line_string = word #Place it on the next line. #Once all words are exhausted, append the remaining string to lines as well lines.append(line_string) self.__lines = tuple(lines) if self.scroll_bar: self.scroll_bar.Set_slider_primary(round(self.scroll_bar.height * min(1, (self.height - 2 * self.text_offset[1]) / (len(self.lines) * self.font_size)))) self.scroll_bar.value_range = (0, max(0, len(self.lines) * self.font_size - self.height + 2 * self.text_offset[1])) self.scrolled += 0 #Update the 'scrolled' value, to take into account that after rebuilding, the length of 'lines' might be different def Make_scroll_bar(self, scroll_bar): """ Make a scroll_bar for a Text object. For internal use only. This function is therefore also not imported by __init__.py """ if isinstance(scroll_bar, Slider): scroll_bar.right = self.width - self.text_offset[0] scroll_bar.centery = math.floor(self.height) if scroll_bar.height > self.height - 2 * text_offset: scroll_bar.height = self.height - 2 * text_offset return(scroll_bar) if isinstance(scroll_bar, int) and int(scroll_bar) == 1: size = (15, self.height - 2 * self.text_offset[1]) pos = (self.width -
import asyncio import contextlib import time from unittest import TestCase from aioquic import tls from aioquic.buffer import Buffer from aioquic.quic import events from aioquic.quic.configuration import QuicConfiguration from aioquic.quic.connection import ( IFType, IPVersion, QuicConnection, QuicConnectionError, QuicReceiveContext, ) from aioquic.quic.logger import QuicLogger from aioquic.quic.packet import QuicErrorCode, QuicFrameType from aioquic.quic.packet_builder import QuicDeliveryState from aioquic.quic.recovery import QuicPacketPacer from tests.utils import SERVER_CACERTFILE, SERVER_CERTFILE, SERVER_KEYFILE CLIENT_ADDR = ("172.16.31.10", 1234) SERVER_ADDR = ("192.168.127.12", 4433) def client_receive_context(client, receiving_uniflow: int = 0, epoch=tls.Epoch.ONE_RTT): return QuicReceiveContext( epoch=epoch, host_cid=client._receiving_uniflows[receiving_uniflow].cid, receiving_uniflow=client._receiving_uniflows[receiving_uniflow], perceived_address=client._receiving_uniflows[receiving_uniflow].source_address, quic_logger_frames=[], time=asyncio.get_event_loop().time(), ) @contextlib.contextmanager def client_and_server( client_kwargs={}, client_options={}, client_patch=lambda x: None, handshake=True, server_kwargs={}, server_certfile=SERVER_CERTFILE, server_keyfile=SERVER_KEYFILE, server_options={}, server_patch=lambda x: None, ): client_configuration = QuicConfiguration( is_client=True, quic_logger=QuicLogger(), local_addresses=[ [CLIENT_ADDR[0], IPVersion.IPV4, IFType.FIXED, CLIENT_ADDR[1]] ], **client_options, ) client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE) client = QuicConnection(configuration=client_configuration, **client_kwargs) client._ack_delay = 0 disable_packet_pacing(client) client_patch(client) server_configuration = QuicConfiguration( is_client=False, quic_logger=QuicLogger(), local_addresses=[ [SERVER_ADDR[0], IPVersion.IPV4, IFType.FIXED, SERVER_ADDR[1]] ], **server_options, ) server_configuration.load_cert_chain(server_certfile, server_keyfile) server = QuicConnection( configuration=server_configuration, original_destination_connection_id=client.original_destination_connection_id, **server_kwargs, ) server._ack_delay = 0 disable_packet_pacing(server) server_patch(server) # perform handshake if handshake: client.connect(SERVER_ADDR, CLIENT_ADDR, now=time.time()) for i in range(3): roundtrip(client, server) yield client, server # close client.close() server.close() def disable_packet_pacing(connection): class DummyPacketPacer(QuicPacketPacer): def next_send_time(self, now): return None for suniflow in connection._sending_uniflows.values(): suniflow.loss._pacer = DummyPacketPacer() def sequence_numbers(connection_ids): return list(map(lambda x: x.sequence_number, connection_ids)) def address_ids(mp_network_addresses): return list(map(lambda x: x.address_id, mp_network_addresses)) def drop(sender): """ Drop datagrams from `sender`. """ return len(sender.datagrams_to_send(now=time.time())) def roundtrip(sender, receiver): """ Send datagrams from `sender` to `receiver` and back. """ return transfer(sender, receiver), transfer(receiver, sender) def transfer(sender, receiver): """ Send datagrams from `sender` to `receiver`. """ datagrams = 0 from_addr = CLIENT_ADDR if sender._is_client else SERVER_ADDR to_addr = SERVER_ADDR if sender._is_client else CLIENT_ADDR for data, addr, local_addr in sender.datagrams_to_send(now=time.time()): datagrams += 1 receiver.receive_datagram(data, from_addr, to_addr, now=time.time()) return datagrams class QuicMPConnectionTest(TestCase): def check_handshake(self, client, server, alpn_protocol=None): """ Check handshake completed. """ event = client.next_event() self.assertEqual(type(event), events.ProtocolNegotiated) self.assertEqual(event.alpn_protocol, alpn_protocol) event = client.next_event() self.assertEqual(type(event), events.HandshakeCompleted) self.assertEqual(event.alpn_protocol, alpn_protocol) self.assertEqual(event.early_data_accepted, False) self.assertEqual(event.session_resumed, False) # CID_seq 0 is implicitly communicated for initial uniflow for i in range(7): self.assertEqual(type(client.next_event()), events.ConnectionIdIssued) # CID_seq 0 is explicitly communicated for other uniflows for j in range(1, len(client._receiving_uniflows)): for i in range(8): self.assertEqual(type(client.next_event()), events.MPConnectionIdIssued) self.assertIsNone(client.next_event()) event = server.next_event() self.assertEqual(type(event), events.ProtocolNegotiated) self.assertEqual(event.alpn_protocol, alpn_protocol) event = server.next_event() self.assertEqual(type(event), events.HandshakeCompleted) self.assertEqual(event.alpn_protocol, alpn_protocol) # CID_seq 0 is implicitly communicated for initial uniflow for i in range(7): self.assertEqual(type(server.next_event()), events.ConnectionIdIssued) # CID_seq 0 is explicitly communicated for other uniflows for j in range(1, len(server._receiving_uniflows)): for i in range(8): self.assertEqual(type(server.next_event()), events.MPConnectionIdIssued) self.assertIsNone(server.next_event()) def test_mp_connect(self): client_msui = 1 server_msui = 1 assert client_msui >= 0 and server_msui >= 0 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): # check handshake completed self.check_handshake(client=client, server=server) # check each endpoint has available connection IDs for each uniflow for the peer for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) for i in range(server_msui + 1): self.assertEqual( sequence_numbers(server._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) # client closes the connection client.close() self.assertEqual(transfer(client, server), 1) # check connection closes on the client side client.handle_timer(client.get_timer()) event = client.next_event() self.assertEqual(type(event), events.ConnectionTerminated) self.assertEqual(event.error_code, QuicErrorCode.NO_ERROR) self.assertEqual(event.frame_type, None) self.assertEqual(event.reason_phrase, "") self.assertIsNone(client.next_event()) # check connection closes on the server side server.handle_timer(server.get_timer()) event = server.next_event() self.assertEqual(type(event), events.ConnectionTerminated) self.assertEqual(event.error_code, QuicErrorCode.NO_ERROR) self.assertEqual(event.frame_type, None) self.assertEqual(event.reason_phrase, "") self.assertIsNone(server.next_event()) # check client log client_log = client.configuration.quic_logger.to_dict() self.assertGreater(len(client_log["traces"][0]["events"]), 20) # check server log server_log = server.configuration.quic_logger.to_dict() self.assertGreater(len(server_log["traces"][0]["events"]), 20) def test_mp_unsupported_client(self): test_frames = [ { "function": 0, "frame_type": QuicFrameType.MP_NEW_CONNECTION_ID, "buffer": Buffer(data=b""), }, { "function": 1, "frame_type": QuicFrameType.MP_RETIRE_CONNECTION_ID, "buffer": Buffer(data=b""), }, { "function": 2, "frame_type": QuicFrameType.MP_ACK_ECN, "buffer": Buffer(data=b""), }, { "function": 3, "frame_type": QuicFrameType.ADD_ADDRESS, "buffer": Buffer(data=b""), }, { "function": 4, "frame_type": QuicFrameType.REMOVE_ADDRESS, "buffer": Buffer(data=b""), }, { "function": 5, "frame_type": QuicFrameType.UNIFLOWS, "buffer": Buffer(data=b""), }, ] for item in test_frames: with client_and_server( server_options={"max_sending_uniflow_id": None}, ) as (client, server): function_objects = [ client._handle_mp_new_connection_id_frame, client._handle_mp_retire_connection_id_frame, client._handle_mp_ack_frame, client._handle_add_address_frame, client._handle_remove_address_frame, client._handle_uniflows_frame, ] with self.assertRaises(QuicConnectionError) as cm: # client receives a multipath frame function_objects[item["function"]]( client_receive_context(client), int(item["frame_type"]), item["buffer"], ) self.assertEqual( cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION, ) self.assertEqual(cm.exception.frame_type, item["frame_type"]) self.assertEqual( cm.exception.reason_phrase, "Multipath frames are not allowed, use max_sending_uniflow_id to signal Multipath support", ) def test_mp_change_connection_id(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 0 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) # the client changes connection ID client.change_connection_id(client_uniflow_id) self.assertEqual(transfer(client, server), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) # the server provides a new connection ID self.assertEqual(transfer(server, client), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7, 8] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) def test_mp_change_connection_id_retransmit_new_connection_id(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 1 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) # the client changes connection ID client.change_connection_id(client_uniflow_id) self.assertEqual(transfer(client, server), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) # the server provides a new connection ID, MP_NEW_CONNECTION_ID is lost self.assertEqual(drop(server), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) # MP_NEW_CONNECTION_ID is retransmitted server._on_mp_new_connection_id_delivery( QuicDeliveryState.LOST, server._receiving_uniflows[client_uniflow_id].cid_available[-1], client_uniflow_id, ) self.assertEqual(transfer(server, client), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7, 8] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) def test_mp_change_connection_id_retransmit_retire_connection_id(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 1 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) # the client changes connection ID, MP_RETIRE_CONNECTION_ID is lost client.change_connection_id(client_uniflow_id) self.assertEqual(drop(client), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) # MP_RETIRE_CONNECTION_ID is retransmitted client._on_mp_retire_connection_id_delivery( QuicDeliveryState.LOST, 0, client_uniflow_id ) self.assertEqual(transfer(client, server), 1) # the server provides a new connection ID self.assertEqual(transfer(server, client), 1) for i in range(client_msui + 1): self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), ( [2, 3, 4, 5, 6, 7, 8] if i == client_uniflow_id else [1, 2, 3, 4, 5, 6, 7] ), ) def test_mp_handle_new_connection_id_duplicate(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 1 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): buf = Buffer(capacity=100) buf.push_uint_var(client_uniflow_id) # uniflow_id buf.push_uint_var(7) # sequence_number buf.push_uint_var(0) # retire_prior_to buf.push_uint8(8) buf.push_bytes(bytes(8)) buf.push_bytes(bytes(16)) buf.seek(0) # client receives NEW_CONNECTION_ID client._handle_mp_new_connection_id_frame( client_receive_context(client), QuicFrameType.NEW_CONNECTION_ID, buf, ) for i in range(client_msui + 1): self.assertEqual(client._sending_uniflows[i].cid.sequence_number, 0) self.assertEqual( sequence_numbers(client._sending_uniflows[i].cid_available), [1, 2, 3, 4, 5, 6, 7], ) def test_mp_handle_new_connection_id_over_limit(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 1 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): buf = Buffer(capacity=100) buf.push_uint_var(client_uniflow_id) # uniflow_id buf.push_uint_var(8) # sequence_number buf.push_uint_var(0) # retire_prior_to buf.push_uint8(8) buf.push_bytes(bytes(8)) buf.push_bytes(bytes(16)) buf.seek(0) # client receives MP_NEW_CONNECTION_ID with self.assertRaises(QuicConnectionError) as cm: client._handle_mp_new_connection_id_frame( client_receive_context(client), QuicFrameType.MP_NEW_CONNECTION_ID, buf, ) self.assertEqual( cm.exception.error_code, QuicErrorCode.CONNECTION_ID_LIMIT_ERROR ) self.assertEqual( cm.exception.frame_type, QuicFrameType.MP_NEW_CONNECTION_ID ) self.assertEqual( cm.exception.reason_phrase, "Uniflow " + str(client_uniflow_id) + " too many active connection IDs", ) def test_mp_handle_new_connection_id_with_retire_prior_to(self): client_msui = 1 server_msui = 0 client_uniflow_id = 1 assert client_msui >= client_uniflow_id >= 1 with client_and_server( client_options={"max_sending_uniflow_id": client_msui}, server_options={"max_sending_uniflow_id": server_msui}, ) as (client, server): buf = Buffer(capacity=100) buf.push_uint_var(client_uniflow_id) # uniflow_id buf.push_uint_var(8) # sequence_number buf.push_uint_var(2) # retire_prior_to buf.push_uint8(8) buf.push_bytes(bytes(8)) buf.push_bytes(bytes(16)) buf.seek(0) # client receives MP_NEW_CONNECTION_ID client._handle_mp_new_connection_id_frame( client_receive_context(client), QuicFrameType.MP_NEW_CONNECTION_ID, buf, ) for i in range(client_msui + 1): self.assertEqual( client._sending_uniflows[i].cid.sequence_number, 2 if i == client_uniflow_id else 0, ) self.assertEqual( sequence_numbers( client._sending_uniflows[client_uniflow_id].cid_available ), [3, 4, 5, 6, 7, 8] if i == client_uniflow_id else [1, 2, 3, 4,
rule = req.url_rule # if we provide automatic options for this URL and the # request came with the OPTIONS method, reply automatically if ( # pragma: no cover getattr(rule, "provide_automatic_options", False) and req.method == "OPTIONS" ): return self.make_default_options_response() # pragma: no cover # otherwise dispatch to the handler for that endpoint view_function = self.view_functions[rule.endpoint] if hasattr(self, 'ensure_sync'): # pragma: no cover view_function = self.ensure_sync(view_function) if rule.endpoint == 'static': # app static route only accepts keyword arguments, see flask#3762 return view_function(**req.view_args) # type: ignore else: return view_function(*req.view_args.values()) # type: ignore @staticmethod def _error_handler( error: HTTPError ) -> t.Union[t.Tuple[dict, int], t.Tuple[dict, int, t.Mapping[str, str]]]: """The default error handler. Arguments: status_code: The status code of the error (4XX and 5xx). message: The simple description of the error. If not provided, the reason phrase of the status code will be used. detail: The detailed information of the error, you can use it to provide the addition information such as custom error code, documentation URL, etc. headers: A dict of headers used in the error response. """ body = { 'detail': error.detail, 'message': error.message, 'status_code': error.status_code } return body, error.status_code, error.headers def error_processor( self, f: ErrorCallbackType ) -> ErrorCallbackType: """A decorator to register an error response processor function. The decorated callback function will be called in the following situations: - An validation error happened when parsing a request. - An exception triggered with [`HTTPError`][apiflask.exceptions.HTTPError] - An exception triggered with [`abort`][apiflask.exceptions.abort]. If you have set the `json_errors` argument to `True` when creating the `app` instance, this callback function will also be used for normal HTTP errors, for example, 404 and 500 errors, etc. You can still register a specific error handler for a specific error code or exception with the `app.errorhandler(code_or_exection)` decorator, in that case, the return value of the error handler will be used as the response when the corresponding error or exception happened. The callback function must accept an error object as argument and return a valid response. Examples: ```python @app.error_processor def my_error_processor(error): return { 'status_code': error.status_code, 'message': error.message, 'detail': error.detail }, error.status_code, error.headers ``` The error object is an instance of [`HTTPError`][apiflask.exceptions.HTTPError], so you can get error information via it's attributes: - status_code: If the error triggered by validation error, the value will be 400 (default) or the value you passed in config `VALIDATION_ERROR_STATUS_CODE`. If the error triggered by [`HTTPError`][apiflask.exceptions.HTTPError] or [`abort`][apiflask.exceptions.abort], it will be the status code you passed. Otherwise, it will be the status code set by Werkzueg when processing the request. - message: The error description for this error, either you passed or grab from Werkzeug. - detail: The detail of the error. When the validation error happened, it will be filled automatically in the following structure: ```python "<location>": { "<field_name>": ["<error_message>", ...], "<field_name>": ["<error_message>", ...], ... }, "<location>": { ... }, ... ``` The value of `location` can be `json` (i.e., request body) or `query` (i.e., query string) depend on the place where the validation error happened. - headers: The value will be `None` unless you pass it in HTTPError or abort. If you want, you can rewrite the whole response body to anything you like: ```python @app.error_processor def my_error_processor(error): return {'error_detail': error.detail}, error.status_code, error.headers ``` However, I would recommend keeping the `detail` in the response since it contains the detailed information about the validation error when the validation error happened. *Version changed: 0.7.0* - Support registering an async callback function. """ if hasattr(self, 'ensure_sync'): # pragma: no cover self.error_callback = self.ensure_sync(f) else: # pragma: no cover self.error_callback = f return f def _register_openapi_blueprint(self) -> None: """Register a blueprint for OpenAPI support. The name of the blueprint is "openapi". This blueprint will hold the view functions for spec file, Swagger UI and Redoc. *Version changed: 0.7.0* - The format of the spec now rely on the `SPEC_FORMAT` config. """ bp = Blueprint( 'openapi', __name__, url_prefix=self.openapi_blueprint_url_prefix ) if self.spec_path: @bp.route(self.spec_path) def spec() -> ResponseType: if self.config['SPEC_FORMAT'] == 'json': response = jsonify(self._get_spec('json')) response.mimetype = self.config['JSON_SPEC_MIMETYPE'] return response return self._get_spec('yaml'), 200, \ {'Content-Type': self.config['YAML_SPEC_MIMETYPE']} if self.docs_path: @bp.route(self.docs_path) def swagger_ui() -> str: return render_template_string( swagger_ui_template, title=self.title, version=self.version, oauth2_redirect_path=self.docs_oauth2_redirect_path ) if self.docs_oauth2_redirect_path: @bp.route(self.docs_oauth2_redirect_path) def swagger_ui_oauth_redirect() -> str: return render_template_string(swagger_ui_oauth2_redirect_template) if self.redoc_path: @bp.route(self.redoc_path) def redoc() -> str: return render_template_string( redoc_template, title=self.title, version=self.version ) if self.enable_openapi and ( self.spec_path or self.docs_path or self.redoc_path ): self.register_blueprint(bp) def _get_spec( self, spec_format: t.Optional[str] = None, force_update: bool = False ) -> t.Union[dict, str]: """Get the current OAS document file. This method will return the cached spec on the first call. If you want to get the latest spec, set the `force_update` to `True` or use the public attribute `app.spec`, which will always return the newly generated spec when you call it. If the config `SYNC_LOCAL_SPEC` is `True`, the local spec specified in config `LOCAL_SPEC_PATH` will be automatically updated when the spec changes. Arguments: spec_format: The format of the spec file, one of `'json'`, `'yaml'` and `'yml'`, defaults to the `SPEC_FORMAT` config. force_update: If ture, will generate the spec for every call instead of using the cache. *Version changed: 0.7.0* - The default format now rely on the `SPEC_FORMAT` config. - Support to sync local spec file. *Version changed: 0.7.1* - Rename the method name to `_get_spec`. - Add the `force_update` parameter. """ if spec_format is None: spec_format = self.config['SPEC_FORMAT'] if self._spec is None or force_update: if spec_format == 'json': self._spec = self._generate_spec().to_dict() else: self._spec = self._generate_spec().to_yaml() if self.spec_callback: self._spec = self.spec_callback(self._spec) # type: ignore # sync local spec if self.config['SYNC_LOCAL_SPEC']: spec_path = self.config['LOCAL_SPEC_PATH'] if spec_path is None: raise TypeError( 'The spec path (LOCAL_SPEC_PATH) should be a valid path string.' ) spec: str if spec_format == 'json': spec = json.dumps( self._spec, indent=self.config['LOCAL_SPEC_JSON_INDENT'] ) else: spec = str(self._spec) with open(spec_path, 'w') as f: f.write(spec) return self._spec # type: ignore def spec_processor(self, f: SpecCallbackType) -> SpecCallbackType: """A decorator to register a spec handler callback function. You can register a function to update the spec. The callback function should accept the spec as an argument and return it in the end. The callback function will be called when generating the spec file. Examples: ```python @app.spec_processor def update_spec(spec): spec['info']['title'] = 'Updated Title' return spec ``` Notice the format of the spec is depends on the the value of configuration variable `SPEC_FORMAT` (defaults to `'json'`): - `'json'` -> dict - `'yaml'` -> string *Version Changed: 0.7.0* - Support registering an async callback function. """ if hasattr(self, 'ensure_sync'): # pragma: no cover self.spec_callback = self.ensure_sync(f) else: # pragma: no cover self.spec_callback = f return f @property def spec(self) -> t.Union[dict, str]: """Get the current OAS document file. This property will call `app._get_spec()` method and set the `force_update` parameter to `True`. *Version changed: 0.7.1* - Generate the spec on every call. """ return self._get_spec(force_update=True) @staticmethod def _schema_name_resolver(schema: t.Type[Schema]) -> str: """Default schema name resovler.""" name = schema.__class__.__name__ if name.endswith('Schema'): name = name[:-6] or name if schema.partial: name += 'Update' return name def _make_info(self) -> dict: """Make OpenAPI info object.""" info: dict if self.info: info = self.info else: info = {} if self.contact: info['contact'] = self.contact if self.license: info['license'] = self.license if self.terms_of_service: info['termsOfService'] = self.terms_of_service if self.description: info['description'] = self.description return info def _make_tags(self) -> t.List[t.Dict[str, t.Any]]: """Make OpenAPI tags object.""" tags: t.Optional[TagsType] = self.tags if tags is not None: # convert simple tags list into standard OpenAPI tags if isinstance(tags[0], str): for index, tag_name in enumerate(tags): tags[index] = {'name': tag_name} # type: ignore else: tags: t.List[t.Dict[str, t.Any]] = [] # type: ignore if self.config['AUTO_TAGS']: # auto-generate tags from blueprints for blueprint_name, blueprint in self.blueprints.items(): if blueprint_name == 'openapi' or \ not hasattr(blueprint, 'enable_openapi') or \ not blueprint.enable_openapi: # type: ignore continue tag: t.Dict[str, t.Any] = get_tag(blueprint, blueprint_name) # type: ignore tags.append(tag) # type: ignore return tags # type: ignore def _generate_spec(self) -> APISpec: """Generate the
of Simics internal events.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="977") # # -------------------- break, tbreak -------------------- # def do_break(object, t, address, length, r, w, x, temp): if length < 1: print "The breakpoint length must be >= 1 bytes." return access = 0 mode = "" if r: access = access | Sim_Access_Read; mode = mode + "r" if w: access = access | Sim_Access_Write; mode = mode + "w" if x or access == 0: access = access | Sim_Access_Execute mode = mode + "x" id = SIM_breakpoint(object, t, access, address, length, temp) if temp: pr("Temporary breakpoint ") else: pr("Breakpoint ") pr(`id` + " set on address " + number_str(address, 16)) if length > 1: pr(", length " + number_str(length, 10)) pr(" with access mode '" + mode + "'\n") bp_list = conf.sim.breakpoints[:] for i in range(len(bp_list)): obj = bp_list[i][11] if obj == object: try: r = SIM_get_attribute_idx(obj, "breakpoints", i+1) if i+1 != id and address <= r[2] and address+length-1 >= r[1]: print "Note: overlaps with breakpoint", r[0] except: pass return (id,) def break_cmd(object, address, len, r, w, x): break_type = Sim_Break_Physical if object.classname == "context": break_type = Sim_Break_Virtual return do_break(object, break_type, address, len, r, w, x, 0) new_command("break", break_cmd, [arg(uint64_t, "address"), arg(uint64_t, "length", "?", 1), arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")], namespace = "breakpoint", type = ["Breakpoints", "Debugging"], short="set breakpoint", see_also = ["unbreak", "delete", "enable", "ignore", "set-prefix", "set-substr", "set-pattern", "list-breakpoints"], doc = """ Add breakpoint (read, write, or execute) on an object implementing the breakpoint interface. This is typically a memory space object such as physical memory; e.g., <cmd>phys_mem0.break 0xff3800</cmd>. Accesses intersecting the given range will trigger the breakpoint. By default the breakpoint will only trigger for instruction execution, but any subset of read, write, and execute accesses can be set to trigger using combinations of <arg>-r</arg>, <arg>-w</arg>, and <arg>-x</arg>. <arg>length</arg> is the interval length in bytes (default is 1). Breakpoints inserted with the <cmd>tbreak</cmd> command are automatically disabled when they have triggered. The default action at a triggered breakpoint is to return to the frontend. This can be changed by using haps. When an execution breakpoint is triggered, Simics will return to the command prompt before the instructions is executed, while instructions triggering read or write breakpoints will complete before control is returned to the command prompt. To break on a virtual address, use a context object: <cmd>primary_context.break 0x1ff00</cmd> Several breakpoints can be set on the same address and Simics will break on them in turn. If hap handlers (callback functions) are connected to the breakpoints they will also be executed in turn. Hap handlers are called before the access is performed, allowing the user to read a memory value that may be overwritten by the access. See the Simics Reference Manual for a description of hap handlers. Each breakpoint is associated with an id (printed when the breakpoint is set or by the <cmd>list-breakpoints</cmd> command) which is used for further references to the breakpoint. For convenience there are also a <cmd>break</cmd> command which sets a breakpoint on memory connected to the current frontend CPU (see <cmd>pselect</cmd>). Default is to break on virtual address accesses (in the current context). By prefixing the address with <arg>p:</arg> it is possible to break on physical accesses as well (cf. <cmd>phys_mem0.break</cmd>); e.g., <cmd>break p:0xffc0</cmd>. Several attributes can be set for a breakpoint for breaking only when some conditions are true. See the <cmd>disable</cmd>, <cmd>enable</cmd>, <cmd>ignore</cmd>, <cmd>set-prefix</cmd>, <cmd>set-substr</cmd> and <cmd>set-pattern</cmd> commands for more details. Breakpoints can be removed using <cmd>delete</cmd>.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1043") def tbreak_cmd(object, address, len, r, w, x): break_type = Sim_Break_Physical if object.classname == "context": break_type = Sim_Break_Virtual else: break_type = Sim_Break_Physical return do_break(object, break_type, address, len, r, w, x, 1) new_command("tbreak", tbreak_cmd, [arg(int_t, "address"), arg(int_t, "length", "?", 1), arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")], namespace = "breakpoint", short="set temporary breakpoint on current processor", doc_with = "<breakpoint>.break", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1112") def classic_break_cmd(address, len, r, w, x): if address[0] == "p": obj = current_processor().physical_memory kind = Sim_Break_Physical else: obj = current_processor().current_context if address[0] == "l": kind = Sim_Break_Linear else: kind = Sim_Break_Virtual return do_break(obj, kind, address[1], len, r, w, x, 0) new_command("break", classic_break_cmd, [arg(addr_t, "address"), arg(uint64_t, "length", "?", 1), arg(flag_t, "-r"), arg(flag_t, "-w"), arg(flag_t, "-x")], alias = "b", short="set breakpoint on current processor", doc_with = "<breakpoint>.break", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1131") # # -------------------- unbreak -------------------- # _removed_breakpoint = 0 def _remove_breakpoint(id, address, length, access): bp = conf.sim.breakpoints[id] if not bp or bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private): return _removed_breakpoint = 1 try: SIM_breakpoint_remove(id, access, address, length) except SimExc_General, msg: print msg def unbreak_cmd(poly, address, length, r, w, x): global _removed_breakpoint _removed_breakpoint = 0 access = 0 if r: access = access | Sim_Access_Read if w: access = access | Sim_Access_Write if x or access == 0: access = access | Sim_Access_Execute if poly[0] == int_t: id = poly[1] bp = conf.sim.breakpoints[id] if not bp or bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private): print "Cannot change simulation internal breakpoints." return _remove_breakpoint(id, address, length, access) else: for bp in conf.sim.breakpoints[:]: _remove_breakpoint(bp[0], address, length, access) new_command("unbreak", unbreak_cmd, [arg((int_t,flag_t), ("id","-all")), arg(int_t, "address"), arg(int_t, "length"), arg(flag_t, "-r"),arg(flag_t, "-w"), arg(flag_t, "-x")], type = ["Breakpoints", "Debugging"], short = "remove breakpoint range", see_also = ['<breakpoint>.break', 'delete'], doc = """ Removes an address range from a breakpoint, splitting the breakpoint if necessary. <arg>-r</arg> (read), <arg>-w</arg> (write) and <arg>-x</arg> (execute) specify the type of breakpoint that should be removed in the given address range. It defaults to <em>execute</em> if no flag is given. <arg>id</arg> is the id number of the breakpoint to operate on. To operate on all breakpoints at once, use the <arg>-all</arg> flag. <cmd>list-breakpoints</cmd> prints all breakpoints' id. """, filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1177") # # -------------------- delete -------------------- # # get breakpoint id from integer or flag def get_id(poly): if poly[0] == flag_t: id = 0l else: id = poly[1] return id def delete_cmd(poly): id = get_id(poly) if id: bp = conf.sim.breakpoints[id] if bp and bp[6] & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private): print "Cannot remove simulation internal breakpoints." return try: SIM_delete_breakpoint(id) # 0 deletes all except Exception, msg: print msg new_command("delete", delete_cmd, [arg((flag_t,int_t), ("-all", "id"))], type = ["Breakpoints", "Debugging"], short = "remove a breakpoint", see_also = ['<breakpoint>.break', 'enable', 'ignore', 'set-prefix', 'set-substr', 'set-pattern', 'list-breakpoints'], doc = """ Removes a breakpoint. <i>id</i> is the id of the breakpoint to delete. Use <cmd>list-breakpoints</cmd> to list all breakpoints' id. If the flag <arg>-all</arg> is given, all breakpoints will be deleted. """, filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1219") # # -------------------- enable, disable -------------------- # def enable_disable_cmd(poly, val): id = get_id(poly) try: if id == 0: bps = conf.sim.breakpoints[:] for i in range(len(bps)): bps[i][5] = val conf.sim.breakpoints[bps[i][0]] = bps[i] else: bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id) bp[5] = val SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp) except Exception, msg: print msg def enable_cmd(poly): enable_disable_cmd(poly, 1) def disable_cmd(poly): enable_disable_cmd(poly, 0) new_command("enable", enable_cmd, [arg((flag_t,int_t), ("-all", "id"))], type = ["Breakpoints", "Debugging"], short = "enable/disable breakpoint", see_also = ['<breakpoint>.break', 'delete', 'ignore', 'list-breakpoints'], doc = """ Enable/disable instruction breakpoint. <i>id</i> is id number of the breakpoint to enable/disable. Use <b>list-breakpoints</b> to list breakpoint id:s. If '-all' is given all breakpoints will be enabled/disabled. Simics will not stop on a disabled breakpoint, however Simics will still count it.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1255") new_command("disable", disable_cmd, [arg((flag_t,int_t), ("-all", "id"))], short = "enable/disable breakpoint", doc_with = "enable", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1267") # # -------------------- ignore -------------------- # def ignore_cmd(id, num): try: bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id) # activate_at = hits + num + 1 bp[4] = bp[3] + num + 1 bp[5] = 0 SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp) except Exception, msg: print msg new_command("ignore", ignore_cmd, [arg(int_t, "id"), arg(int_t, "num")], type = ["Breakpoints", "Debugging"], short = "set ignore count for a breakpoint", see_also = ['enable', 'list-breakpoints'], doc = """ Sets the ignore count for a breakpoint. This means that the next <i>num</i> times the breakpoint is reached it will not trigger (hap handlers will not be called). To break next time set <i>num</i> to 0.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1286") # # -------------------- set-prefix -------------------- # def set_prefix_cmd(id, prefix): try: bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id) if not (bp[2] & 4): print "This can only be applied to execution breakpoints (access type x)." return bp[7] = prefix SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp) except Exception, msg: print msg new_command("set-prefix", set_prefix_cmd, [arg(int_t, "id"), arg(str_t, "prefix")], type = ["Breakpoints", "Debugging"], short = "set a syntax prefix for a breakpoint", doc_items = [('Note', 'Only supported for execution breakpoints.')], see_also = ['set-substr', 'set-pattern'], doc = """ Set a syntax prefix for a breakpoint. When set Simics will only break on instructions with a
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: iam short_description: Manage IAM users, groups, roles and keys description: - Allows for the management of IAM users, user API keys, groups, roles. version_added: "2.0" options: iam_type: description: - Type of IAM resource choices: ["user", "group", "role"] type: str name: description: - Name of IAM resource to create or identify required: true type: str new_name: description: - When state is update, will replace name with new_name on IAM resource type: str new_path: description: - When state is update, will replace the path with new_path on the IAM resource type: str state: description: - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. required: true choices: [ "present", "absent", "update" ] type: str path: description: - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. default: "/" type: str trust_policy: description: - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy_filepath). version_added: "2.2" type: dict trust_policy_filepath: description: - The path to the trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy). version_added: "2.2" type: str access_key_state: description: - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"] type: str key_count: description: - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. default: 1 type: int access_key_ids: description: - A list of the keys that you want impacted by the access_key_state parameter. type: list groups: description: - A list of groups the user should belong to. When update, will gracefully remove groups not listed. type: list password: description: - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. type: str update_password: default: always choices: ['always', 'on_create'] description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. type: str notes: - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' author: - "<NAME> (@defionscode)" - "<NAME> (@seiffert)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic user creation example tasks: - name: Create two new IAM users with API keys iam: iam_type: user name: "{{ item }}" state: present password: "{{ <PASSWORD> }}" access_key_state: create loop: - jcleese - mpython # Advanced example, create two new groups and add the pre-existing user # jdavila to both groups. task: - name: Create Two Groups, Mario and Luigi iam: iam_type: group name: "{{ item }}" state: present loop: - Mario - Luigi register: new_groups - name: iam: iam_type: user name: jdavila state: update groups: "{{ item.created_group.group_name }}" loop: "{{ new_groups.results }}" # Example of role with custom trust policy for Lambda service - name: Create IAM role with custom trust relationship iam: iam_type: role name: AAALambdaTestRole state: present trust_policy: Version: '2012-10-17' Statement: - Action: sts:AssumeRole Effect: Allow Principal: Service: lambda.amazonaws.com ''' RETURN = ''' role_result: description: the IAM.role dict returned by Boto type: str returned: if iam_type=role and state=present sample: { "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role", "assume_role_policy_document": "...truncated...", "create_date": "2017-09-02T14:32:23Z", "path": "/", "role_id": "AROAA1B2C3D4E5F6G7H8I", "role_name": "my-new-role" } roles: description: a list containing the name of the currently defined roles type: list returned: if iam_type=role and state=present sample: [ "my-new-role", "my-existing-role-1", "my-existing-role-2", "my-existing-role-3", "my-existing-role-...", ] ''' import json import traceback try: import boto.exception import boto.iam import boto.iam.connection except ImportError: pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec, get_aws_connection_info) def _paginate(func, attr): ''' paginates the results from func by continuously passing in the returned marker if the results were truncated. this returns an iterator over the items in the returned response. `attr` is the name of the attribute to iterate over in the response. ''' finished, marker = False, None while not finished: res = func(marker=marker) for item in getattr(res, attr): yield item finished = res.is_truncated == 'false' if not finished: marker = res.marker def list_all_groups(iam): return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')] def list_all_users(iam): return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')] def list_all_roles(iam): return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')] def list_all_instance_profiles(iam): return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')] def create_user(module, iam, name, pwd, path, key_state, key_count): key_qty = 0 keys = [] try: user_meta = iam.create_user( name, path).create_user_response.create_user_result.user changed = True if pwd is not None: pwd = iam.create_login_profile(name, pwd) if key_state in ['create']: if key_count: while key_count > key_qty: keys.append(iam.create_access_key( user_name=name).create_access_key_response. create_access_key_result. access_key) key_qty += 1 else: keys = None except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: user_info = dict(created_user=user_meta, password=<PASSWORD>, access_keys=keys) return (user_info, changed) def delete_dependencies_first(module, iam, name): changed = False # try to delete any keys try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc()) # try to delete login profiles try: login_profile = iam.get_login_profiles(name).get_login_profile_response iam.delete_login_profile(name) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg: module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc()) # try to detach policies try: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'must detach all policies first' in error_msg: module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the policies " "through the console and try again." % name) module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc()) # try to deactivate associated MFA devices try: mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', []) for device in mfa_devices: iam.deactivate_mfa_device(name, device['serial_number']) changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc()) return changed def delete_user(module, iam, name): changed = delete_dependencies_first(module, iam, name) try: iam.delete_user(name) except boto.exception.BotoServerError as ex: module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc()) else: changed = True return name, changed def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): changed = False name_change = False if updated and new_name: name = new_name try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] name = new_name else: module.fail_json(changed=False, msg=str(err)) updated_key_list = {} if new_name or new_path: c_path = iam.get_user(name).get_user_result.user['path'] if (name != new_name) or (c_path != new_path): changed = True try: if not updated: user = iam.update_user( name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata else: user = iam.update_user( name, new_path=new_path).update_user_response.response_metadata user['updates'] = dict( old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=False, msg=str(err)) else: if not updated: name_change = True if pwd: try: iam.update_login_profile(name, pwd) changed = True except boto.exception.BotoServerError: try: iam.create_login_profile(name, pwd) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(str(err)) if 'Password does not conform to the account password policy' in error_msg: module.fail_json(changed=False, msg="Password doesn't conform to policy") else: module.fail_json(msg=error_msg) try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] name
<reponame>gruunday/useradm # --------------------------------------------------------------------------- # # MODULE DESCRIPTION # # --------------------------------------------------------------------------- # """RedBrick User Database Module; contains RBUserDB class.""" import crypt import fcntl import math import os import random import re import sys import time import ldap import rbconfig from rberror import RBError, RBFatalError, RBWarningError from rbopt import RBOpt from rbuser import RBUser # --------------------------------------------------------------------------- # # DATA # # --------------------------------------------------------------------------- # __version__ = '$Revision: 1.10 $' __author__ = '<NAME>' # --------------------------------------------------------------------------- # # CLASSES # # --------------------------------------------------------------------------- # class RBUserDB: """Class to interface with user database.""" valid_shells = None backup_shells = None def __init__(self): """Create new RBUserDB object.""" self.opt = RBOpt() self.ldap = None self.ldap_dcu = None def connect(self, uri=rbconfig.LDAP_URI, dn=rbconfig.LDAP_ROOT_DN, password=None, dcu_uri=rbconfig.LDAP_DCU_URI, dcu_dn=rbconfig.LDAP_DCU_RBDN, dcu_pw=None): """Connect to databases. Custom URI, DN and password may be given for RedBrick LDAP. Password if not given will be read from shared secret file set in rbconfig. Custom URI may be given for DCU LDAP. """ if not password: try: pw_file = open(rbconfig.LDAP_ROOTPW_FILE, 'r') password = pw_file.readline().rstrip() except IOError: raise RBFatalError("Unable to open LDAP root password file") pw_file.close() if not dcu_pw: try: pw_file = open(rbconfig.LDAP_DCU_RBPW, 'r') dcu_pw = pw_file.readline().rstrip() except IOError: raise RBFatalError("Unable to open DCU AD root password file") pw_file.close() # Default protocol seems to be 2, set to 3. ldap.set_option(ldap.OPT_PROTOCOL_VERSION, 3) # Connect to RedBrick LDAP. self.ldap = ldap.initialize(uri) self.ldap.simple_bind_s(dn, password) # Connect to DCU LDAP (anonymous bind). self.ldap_dcu = ldap.initialize(dcu_uri) # self.ldap_dcu.simple_bind_s('', '') self.ldap_dcu.simple_bind_s(dcu_dn, dcu_pw) def close(self): """Close database connections.""" if self.ldap: self.ldap.unbind() if self.ldap_dcu: self.ldap_dcu.unbind() def setopt(self, opt): """Use given RBOpt object to retrieve options.""" self.opt = opt # ------------------------------------------------------------------- # # USER CHECKING AND INFORMATION RETRIEVAL METHODS # # ------------------------------------------------------------------- # def check_userfree(self, uid): """Check if a username is free. If username is already used or is an LDAP group, an RBFatalError is raised. If the username is in the additional reserved LDAP tree, an RBWarningError is raised and checked if it is to be overridden. """ res = self.ldap.search_s(rbconfig.ldap_accounts_tree, ldap.SCOPE_ONELEVEL, 'uid=%s' % uid) if res: raise RBFatalError( "Username '%s' is already taken by %s account (%s)" % (uid, res[0][1]['objectClass'][0].decode(), res[0][1]['cn'][0].decode())) res = self.ldap.search_s(rbconfig.ldap_group_tree, ldap.SCOPE_ONELEVEL, 'cn=%s' % uid) if res: raise RBFatalError("Username '%s' is reserved (LDAP Group)" % uid) res = self.ldap.search_s(rbconfig.ldap_reserved_tree, ldap.SCOPE_ONELEVEL, 'uid=%s' % uid) if res: self.rberror( RBWarningError("Username '%s' is reserved (%s)" % (uid, res[0][ 1]['description'][0].decode()))) def check_user_byname(self, uid): """Raise RBFatalError if given username does not exist in user database.""" if not self.ldap.search_s(rbconfig.ldap_accounts_tree, ldap.SCOPE_ONELEVEL, 'uid=%s' % uid): raise RBFatalError("User '%s' does not exist" % uid) def check_user_byid(self, user_id): """Raise RBFatalError if given id does not belong to a user in user database.""" if not self.ldap.search_s(rbconfig.ldap_accounts_tree, ldap.SCOPE_ONELEVEL, 'id=%s' % user_id): raise RBFatalError("User with id '%s' does not exist" % user_id) def check_group_byname(self, group): """Raise RBFatalError if given group does not exist in group database.""" if not self.ldap.search_s(rbconfig.ldap_group_tree, ldap.SCOPE_ONELEVEL, 'cn=%s' % group): raise RBFatalError("Group '%s' does not exist" % group) def check_group_byid(self, gid): """Raise RBFatalError if given id does not belong to a group in group database.""" if not self.ldap.search_s(rbconfig.ldap_group_tree, ldap.SCOPE_ONELEVEL, 'gidNumber=%s' % gid): raise RBFatalError("Group with id '%s' does not exist" % gid) # ------------------------------------------------------------------- # # INFORMATION RETRIEVAL METHODS # # ------------------------------------------------------------------- # # fixme still needed ? # def get_usertype_byname(self, uid): # """Return usertype for username in user database. Raise # RBFatalError if user does not exist.""" # res = self.ldap.search_s(rbconfig.ldap_accounts_tree, # ldap.SCOPE_ONELEVEL, 'uid=%s' % usr.uid, # ('objectClass', )) # if res: # for i in res[0][1]['objectClass']: # if i in rbconfig.usertypes: # return i # else: # raise RBFatalError("Unknown usertype for user '%s'" % uid) # else: # raise RBFatalError("User '%s' does not exist" % uid) def get_user_byname(self, usr): """Populate RBUser object with data from user with given username in user database. Raise RBFatalError if user does not exist.""" res = self.ldap.search_s(rbconfig.ldap_accounts_tree, ldap.SCOPE_ONELEVEL, 'uid=%s' % usr.uid) if res: self.set_user(usr, res[0]) else: raise RBFatalError("User '%s' does not exist" % usr.uid) def get_user_byid(self, usr): """Populate RBUser object with data from user with given id in user database. Raise RBFatalError if user does not exist.""" res = self.ldap.search_s(rbconfig.ldap_accounts_tree, ldap.SCOPE_ONELEVEL, 'id=%s' % usr.id) if res: self.set_user(usr, res[0]) else: raise RBFatalError("User with id '%s' does not exist" % usr.id) def get_userinfo_new(self, usr, override=0): """Checks if ID already belongs to an existing user and if so raises RBFatalError. Populates RBUser object with data for new user from DCU databases otherwise raises RBWarningError.""" if usr.id is not None: tmpusr = RBUser(id=usr.id) try: self.get_user_byid(tmpusr) except RBError: pass else: raise RBFatalError("Id '%s' is already registered to %s (%s)" % (usr.id, tmpusr.uid, tmpusr.cn)) self.get_dcu_byid(usr, override) def get_userinfo_renew(self, usr, curusr=None, override=0): """Merge RBUser object with current data from DCU & user databases. Set curusr if given to current data from user database.""" # Load the user data currently in the database. # if not curusr: curusr = RBUser() curusr.uid = usr.uid curusr.id = usr.id if usr.uid: self.get_user_byname(curusr) else: self.get_user_byid(curusr) usr.usertype = usr.usertype or curusr.usertype usr.id = usr.id if usr.id is not None else curusr.id self.check_renewal_usertype(usr.usertype) if usr.usertype in rbconfig.usertypes_dcu: # Load the dcu data using usertype and ID set in the given usr # or failing that from the current user database. # dcuusr = RBUser(uid=usr.uid, usertype=usr.usertype, id=usr.id) try: self.get_dcu_byid(dcuusr, override=1) except RBError as err: self.rberror(err) # Any attributes not set in the given usr are taken from the # current dcu database or failing that, the current user # database. # # Exceptions to this are: # # - updatedby: caller must give this # - email: for associates as it may be changed from their DCU # address when they leave DCU so we don't want to # automatically overwrite it. # - usertype: if get_dcu_byid finds the dcu details, it'll set # the usertype as appropriate when override option is given, # so we automatically override this here too. # if usr.usertype == 'associat': dcuusr.altmail = None usr.merge(dcuusr, override=override) usr.merge(RBUser(curusr, updatedby=None)) @classmethod def get_userdefaults_new(cls, usr): """Populate RBUser object with default values for a new user. Usertype should be provided, but a default of member will be assumed.""" if not usr.usertype: usr.usertype = 'member' if usr.newbie is None: usr.newbie = 1 if (usr.yearsPaid is None and (usr.usertype in rbconfig.usertypes_paying) and usr.usertype not in ('committe', 'guest')): usr.yearsPaid = 1 @classmethod def get_userdefaults_renew(cls, usr): """Populate RBUser object with some reasonable default values for renewal user""" if usr.usertype in rbconfig.usertypes_paying: if usr.yearsPaid is None or usr.yearsPaid < 1: usr.yearsPaid = 1 def get_dcu_byid(self, usr, override=0): """Populates RBUser object with data for new user from appropriate DCU database for the given usertype. If usertype is not given, all DCU databases are tried and the usertype is determined from which database has the given ID. If no data for ID, raise RBWarningError.""" # Just try all databases for a match regardless if # usertype was given or not. If usertype wasn't set or the # override option is given, set the usertype to the # corresponding database that had the ID. # usertype = None try: self.get_staff_byid(usr, override) except RBError: try: self.get_alumni_byid(usr, override) except RBError as err: try: self.get_student_byid(usr, override) except RBError as err: if usr.usertype not in ('associat', 'staff'): self.rberror(err) else: usertype = 'member' else: usertype = 'associat' else: usertype = 'staff' # fixme: this overrides committe people (typically back to member) # which probably shouldn't be done? if usertype and (override or not usr.usertype): usr.usertype = usertype return # Graduates now remain in the (currently student, but may # change) LDAP tree for their life long email accounts so try # to load in information for associates (but don't fail if we # can't). # # if usr.usertype in ('member', 'associat', 'committe'): # try: # self.get_student_byid(usr, override) # except RBError, e: # if usr.usertype != 'associat': # self.rberror(e) # # Not all staff may be in the LDAP tree, so don't fail if we # # can't get their information either. # # # elif usr.usertype == 'staff': # try: # self.get_staff_byid(usr, override) # except RBError: # pass def get_student_byid(self, usr, override=0): """Populate
self.boton[3].setStyleSheet("background-color: rgb(239, 172, 122);") def txMargen(self): self.boton[4].setStyleSheet("background-color: rgb(239, 172, 122);") def Graficar(self): self.figura.chart().removeAllSeries() if self.motor.redOk: GSuperficie(self) GPatrones(self) if self.motor.redOk: GRed(self, self.motor.laRed) GAxes(self) self.InfoRed() def InfoIni(self): self.Graficar() # poner informacion sobre los patrones self.display[0].setText("PatronesE: " + str(self.motor.numPEVT[0])) dim = self.motor.patrones.shape[1] - 1 self.display[1].setText("Entradas: " + str(dim)) cla = int(self.motor.patrones[:, dim].max() + 1) self.display[2].setText("Clases: " + str(cla)) def InfoRed(self): # poner informacion sobre la red if self.motor.redOk: act = np.where(self.motor.laRed.actK, 1, 0).sum() self.display[3].setText("Dendritas: " + str(act)) self.display[4].setText("Inhibidas: " + str(self.motor.laRed.actK.size - act)) self.motor.laRed.errorCM(self.motor.patrones[0:self.motor.numPEVT[0], :]) self.display[5].setText("ECM: " + str(round(self.motor.laRed.error, 6))) def BloquearCosas(self, block): if block: self.estadoH = [] for i in range(len(self.boton)): self.estadoH.append(self.boton[i].isEnabled()) self.boton[i].setEnabled(False) for i in range(len(self.escribe)): self.estadoH.append(self.escribe[i].isEnabled()) self.escribe[i].setEnabled(False) self.estadoH.append(self.selector.isEnabled()) self.selector.setEnabled(False) self.estadoH.append(self.unirDyC.isEnabled()) self.unirDyC.setEnabled(False) else: n = 0 for i in range(len(self.boton)): self.boton[i].setEnabled(self.estadoH[n]) n += 1 for i in range(len(self.escribe)): self.escribe[i].setEnabled(self.estadoH[n]) n += 1 self.selector.setEnabled(self.estadoH[n]) self.unirDyC.setEnabled(self.estadoH[n + 1]) def FinHilo(self): QMessageBox.about(self, "Éxito", "Éxito:\n" "inicialización ejecutada correctamente") self.Graficar() class HiloProcesos2(QThread): def __init__(self): QThread.__init__(self) self.origen = None self.esKmedias = True def run(self): elId = self.origen elId.vivo.setVisible(True) elId.BloquearCosas(True) if self.esKmedias: clu = int(elId.escribe[0].text() if elId.escribe[0].text() != "" else "0") dim = float(elId.escribe[1].text() if elId.escribe[1].text() != "." else "0.0") dim = (100.0 if dim == 99.99 else dim) dim = (5.0 if dim < 5.0 else dim) / 100.0 elId.motor.laRed.KmediasItera(elId.motor.patrones[0:elId.motor.numPEVT[0], :], clu, dim) else: mar = float(elId.escribe[2].text() if elId.escribe[2].text() != "." else "0.0") mar = (100.0 if mar == 99.99 else mar) / 100.0 elId.motor.laRed.DyC(elId.motor.patrones[0:elId.motor.numPEVT[0], :], mar, elId.unirDyC.isChecked()) time.sleep(1) elId.BloquearCosas(False) elId.vivo.setVisible(False) class Entreno(QWidget): def __init__(self, tipo): QWidget.__init__(self) # que clase de entreno es: 0:SGD, 1:DE, 2:PSO self.tipo = tipo if tipo == 0: self.setWindowTitle("SoftwareDMNN-SGD") self.setObjectName("gradiente") elif tipo == 1: self.setWindowTitle("SoftwareDMNN-DE") self.setObjectName("evolutivo") elif tipo == 2: self.setWindowTitle("SoftwareDMNN-PSO") self.setObjectName("particulas") self.setWindowIcon(QIcon("img12.png")) # id de GUI del menu principal, para volver self.menu = None # array que guardara la id de las otras GUIs self.modulo = None # id de la clase maestra del softare self.motor = None # guarda el estado actual de la GUI: 0:stop, 1:pausa, 2:run self.estado = 0 # clase que ejecuta los algoritmos importantes en hilos self.elHilo = HiloProcesos1() self.elHilo.origen = self self.elHilo.finished.connect(self.FinHilo) # dice si esta disponible para pintar self.libre = True # definir los botones de la GUI con su nombre e indice self.boton = [] self.boton.append(QPushButton(QIcon("img13.png"), "Menú")) self.boton.append(QPushButton(QIcon("img28.png"), "")) self.boton.append(QPushButton(QIcon("img29.png"), "")) self.boton.append(QPushButton(QIcon("img30.png"), "")) self.boton.append(QPushButton(QIcon("img36.png"), "Por Defecto")) # activar las ayudas de texto para los botones self.boton[0].setToolTip(MyToolTip(9)) self.boton[1].setToolTip(MyToolTip(29)) self.boton[2].setToolTip(MyToolTip(30)) self.boton[3].setToolTip(MyToolTip(31)) self.boton[4].setToolTip(MyToolTip(21)) # poner nombres a los botones para el estilo self.boton[0].setObjectName("m_menu") for i in range(1, len(self.boton)): if tipo == 0: self.boton[i].setObjectName("b_gradiente") elif tipo == 1: self.boton[i].setObjectName("b_evolutivo") elif tipo == 2: self.boton[i].setObjectName("b_particulas") # conectar los botones a su funcion correspondiente self.boton[0].clicked.connect(self.toMenu) self.boton[1].clicked.connect(self.Play) self.boton[2].clicked.connect(self.Pause) self.boton[3].clicked.connect(self.Stop) self.boton[4].clicked.connect(self.Defecto) # definir el selector de grafica self.selector = QComboBox() self.selector.insertItem(0, " Patrones (entrenamiento veloz, sin refresco)") self.selector.insertItem(1, " Patrones + Cajas") self.selector.insertItem(2, " Patrones + Superficie") self.selector.insertItem(3, " Patrones + Cajas + Superficie") self.selector.insertItem(4, " Entrenamiento: roj:eE, azu:eV, mag:eT, ver:Den") if tipo == 1: self.selector.insertItem(5, " Genes (vista de muestra)") elif tipo == 2: self.selector.insertItem(5, " Partículas (vista de muestra)") self.selector.setCurrentIndex(4) self.selector.activated.connect(self.cambioSelect) # definir el check para DE o SGD if tipo == 1: self.ordenamiento = QComboBox() self.ordenamiento.insertItem(0, "Seleccionar Padre vs Hijo") self.ordenamiento.insertItem(1, "Seleccionar Par Global Azaroso") self.ordenamiento.setCurrentIndex(0) self.ordenamiento.setToolTip(MyToolTip(32)) else: self.ordenamiento = None # definir las cajas de escritura self.escribe = [] self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) if tipo == 0: self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) # inutil self.escribe.append(QLineEdit("")) elif tipo == 1: self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) # inutil self.escribe.append(QLineEdit("")) elif tipo == 2: self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) self.escribe.append(QLineEdit("")) # activar las ayudas de texto para las cajas de escritura self.escribe[0].setToolTip(MyToolTip(34)) self.escribe[1].setToolTip(MyToolTip(35)) if tipo == 0: self.escribe[2].setToolTip(MyToolTip(36)) self.escribe[3].setToolTip(MyToolTip(39)) self.escribe[4].setToolTip(MyToolTip(40)) self.escribe[5].setToolTip(MyToolTip(41)) elif tipo == 1: self.escribe[2].setToolTip(MyToolTip(37)) self.escribe[3].setToolTip(MyToolTip(42)) self.escribe[4].setToolTip(MyToolTip(43)) self.escribe[5].setToolTip(MyToolTip(44)) elif tipo == 2: self.escribe[2].setToolTip(MyToolTip(38)) self.escribe[3].setToolTip(MyToolTip(45)) self.escribe[4].setToolTip(MyToolTip(46)) self.escribe[5].setToolTip(MyToolTip(47)) self.escribe[6].setToolTip(MyToolTip(48)) self.escribe[7].setToolTip(MyToolTip(49)) # modificar propiedades de las cajas de escritura for i in range(len(self.escribe)): self.escribe[i].setAlignment(Qt.AlignCenter) self.escribe[i].setFixedWidth(150) # limitar la longitud de los textos y su formato self.escribe[0].setInputMask("000000000;") self.escribe[1].setInputMask("00.00;") self.escribe[2].setInputMask("00.00;") if tipo == 0: self.escribe[3].setInputMask("0000000;") self.escribe[4].setInputMask("0.000;") self.escribe[5].setInputMask("0.000;") elif tipo == 1: self.escribe[3].setInputMask("0000;") self.escribe[4].setInputMask("0.000;") self.escribe[5].setInputMask("00.00;") elif tipo == 2: self.escribe[3].setInputMask("0000;") self.escribe[4].setInputMask("0.000;") self.escribe[5].setInputMask("0.000;") self.escribe[6].setInputMask("0.000;") self.escribe[7].setInputMask("0.000000;") # definir los textos que seran cambiados con codigo self.display = [] self.display.append(QLabel(" ")) self.display.append(QLabel("Dendr:")) self.display.append(QLabel("ERL:")) self.display.append(QLabel("ECM:")) # modificar propiedades de los textos cambiantes for i in range(len(self.display)): self.display[i].setAlignment(Qt.AlignLeft) self.display[2].setToolTip(MyToolTip(58)) # definir la barra de progreso para el entreno self.progreso = QProgressBar() self.progreso.setMaximum(100) if tipo == 0: self.progreso.setObjectName("v_gradiente") elif tipo == 1: self.progreso.setObjectName("v_evolutivo") elif tipo == 2: self.progreso.setObjectName("v_particulas") self.progreso.setAlignment(Qt.AlignRight) # crear el contenedor principal rejilla = QGridLayout() # poner el escalado por defecto de la rejilla, vertical rejilla.setRowStretch(0, 1) rejilla.setRowStretch(1, 100) rejilla.setRowStretch(2, 1) # poner el escalado por defecto de la rejilla, horizontal rejilla.setColumnStretch(0, 1) rejilla.setColumnStretch(1, 100) rejilla.setColumnStretch(2, 1) rejilla.setColumnStretch(3, 25) rejilla.setColumnStretch(4, 1) # crear los dos contenedores secundarios zizq = QVBoxLayout() zder = QVBoxLayout() # agregar selector de grafica zizq.addWidget(self.selector) # agregar la grafica al panel izquierdo self.figura = QChartView() self.figura.chart().setDropShadowEnabled(False) self.figura.chart().setMargins(QMargins(0, 0, 0, 0)) zizq.addWidget(self.figura) # agregar los textos cambiantes subhori = QHBoxLayout() subhori.addWidget(self.display[1]) subhori.addWidget(self.display[3]) subhori.addWidget(self.display[2]) zizq.addLayout(subhori) # agregar la barra de progreso de entrenamiento subhori = QHBoxLayout() subhori.addWidget(self.progreso) subhori.addWidget(self.display[0]) zizq.addLayout(subhori) # agregar el titulo del submenu y el boton de volver al menu subhori = QHBoxLayout() self.boton[0].setFixedWidth(150) self.boton[4].setFixedWidth(150) subsubv = QVBoxLayout() subsubv.addWidget(self.boton[0]) subsubv.addWidget(self.boton[4]) subhori.addLayout(subsubv) if tipo == 0: xtt = QLabel("Gradiente\nDescendente\nEstocástico") xtt.setStyleSheet("background-color: rgb(255,220,215);") elif tipo == 1: xtt = QLabel("Evolución\nDiferencial") xtt.setStyleSheet("background-color: rgb(209,255,207);") elif tipo == 2: xtt = QLabel("Optimización\npor Enjambre\nde Partículas") xtt.setStyleSheet("background-color: rgb(211,207,255);") xtt.setAlignment(Qt.AlignCenter) xtt.setObjectName("subtitulo") subhori.addWidget(xtt) zder.addLayout(subhori) # comprimir lo de la derecha hacia abajo zder.addStretch(1) # agregar packete pack = QGroupBox("Parámetros de Escalabilidad") subpack = QVBoxLayout() subpack.addSpacing(10) # # agregar el par escribe/texto subhori = QHBoxLayout() subhori.addWidget(self.escribe[3]) if tipo == 0: xtt = QLabel("Mini-Bache") elif tipo == 1: xtt = QLabel("Dimensión de Población") elif tipo == 2: xtt = QLabel("Número de Partículas") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # # agregar el check para DE o SGD if tipo == 1: subpack.addWidget(self.ordenamiento) # pack.setLayout(subpack) zder.addWidget(pack) # agregar packete pack = QGroupBox("Parámetros del Algoritmo") subpack = QVBoxLayout() subpack.addSpacing(10) # # agregar el par escribe/texto subhori = QHBoxLayout() subhori.addWidget(self.escribe[4]) if tipo == 0: xtt = QLabel("Paso alfa") elif tipo == 1: xtt = QLabel("h Escala de Mutación") elif tipo == 2: xtt = QLabel("c1 Memoria Local") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # # agregar el par escribe/texto subhori = QHBoxLayout() subhori.addWidget(self.escribe[5]) if tipo == 0: xtt = QLabel("Fricción beta") elif tipo == 1: xtt = QLabel("c % Recombinación") elif tipo == 2: xtt = QLabel("c2 Memoria Global") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # # agregar el par escribe/texto if tipo == 2: subhori = QHBoxLayout() subhori.addWidget(self.escribe[6]) xtt = QLabel("c3 Fricción") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # # agregar el par escribe/texto subhori = QHBoxLayout() subhori.addWidget(self.escribe[2]) xtt = QLabel("Factor Amortiguamiento") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # pack.setLayout(subpack) zder.addWidget(pack) # agregar packete pack = QGroupBox("Disminuir Dendritas") subpack = QVBoxLayout() subpack.addSpacing(10) # # agregar el par escribe/texto subhori = QHBoxLayout() subhori.addWidget(self.escribe[1]) xtt = QLabel("% Probabilidad Inhibir") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # subhori = QHBoxLayout() subhori.addWidget(self.escribe[7]) xtt = QLabel("Tolerancia, ECM Máximo") xtt.setAlignment(Qt.AlignCenter) subhori.addWidget(xtt) subpack.addLayout(subhori) # pack.setLayout(subpack) zder.addWidget(pack) # agregar packete pack = QGroupBox("Ejecución") subpack = QVBoxLayout() subpack.addSpacing(10) # # agregar linea y botones de entreno subhori = QHBoxLayout() subhori.addWidget(self.escribe[0]) subhori.addWidget(self.boton[1]) subhori.addWidget(self.boton[2]) subhori.addWidget(self.boton[3]) subpack.addLayout(subhori) # pack.setLayout(subpack) zder.addWidget(pack) # comprimir lo de la derecha hacia arriba zder.addStretch(1) # poner los contenedores secundarios en el principal rejilla.addLayout(zizq, 1, 1) rejilla.addLayout(zder, 1, 3) # poner en las esquinas de la rejilla el espaciador invisible pix = QPixmap("img11.png") xtt = QLabel() xtt.setPixmap(pix) rejilla.addWidget(xtt, 0, 0) xtt = QLabel() xtt.setPixmap(pix) rejilla.addWidget(xtt, 2, 0) self.vivo = QLabel() gif = QMovie("wait.gif") self.vivo.setMovie(gif) gif.start() self.vivo.setVisible(False) rejilla.addWidget(self.vivo, 0, 2) xtt = QLabel() xtt.setPixmap(pix) rejilla.addWidget(xtt, 2, 2) xtt = QLabel() xtt.setPixmap(pix) rejilla.addWidget(xtt, 0, 4) xtt = QLabel() xtt.setPixmap(pix) rejilla.addWidget(xtt, 2, 4) # agregar el contenedor principal a la ventana self.setLayout(rejilla) self.Defecto() def toMenu(self): self.selector.setCurrentIndex(4) self.menu.show() self.menu.setGeometry(self.geometry()) FondoPSI(self.menu) self.hide() def resizeEvent(self, size): FondoPSI(self) def Play(self): self.estado = 2 if self.selector.currentIndex() == 2 or self.selector.currentIndex() == 3: self.selector.setCurrentIndex(1) self.elHilo.start() def Pause(self): self.estado = 1 def Stop(self):
str """ if self.use_phase_model: return r"Time [MJD]" else: return r"Time since burst [days]" @property def ylabel(self) -> str: """ :return: ylabel used in plotting functions :rtype: str """ try: return self.ylabel_dict[self.data_mode] except KeyError: raise ValueError("No data mode specified") def set_bands_and_frequency( self, bands: Union[None, list, np.ndarray], frequency: Union[None, list, np.ndarray]): """Sets bands and frequencies at the same time to keep the logic consistent. If both are given use those values. If only frequencies are given, use them also as band names. If only bands are given, try to convert them to frequencies. :param bands: The bands, e.g. ['g', 'i']. :type bands: Union[None, list, np.ndarray] :param frequency: The frequencies associated with the bands. :type frequency: Union[None, list, np.ndarray] """ if (bands is None and frequency is None) or (bands is not None and frequency is not None): self._bands = bands self._frequency = frequency elif bands is None and frequency is not None: self._frequency = frequency self._bands = self.frequency elif bands is not None and frequency is None: self._bands = bands self._frequency = self.bands_to_frequency(self.bands) @property def frequency(self) -> np.ndarray: """ :return: Used band frequencies :rtype: np.ndarray """ return self._frequency @frequency.setter def frequency(self, frequency: np.ndarray) -> None: """ :param frequency: Set band frequencies if an array is given. Otherwise, convert bands to frequencies. :type frequency: np.ndarray """ self.set_bands_and_frequency(bands=self.bands, frequency=frequency) @property def bands(self) -> Union[list, None, np.ndarray]: return self._bands @bands.setter def bands(self, bands: Union[list, None, np.ndarray]): self.set_bands_and_frequency(bands=bands, frequency=self.frequency) @property def filtered_frequencies(self) -> np.array: """ :return: The frequencies only associated with the active bands. :rtype: np.ndarray """ return self.frequency[self.filtered_indices] @property def active_bands(self) -> list: """ :return: List of active bands used. :rtype list: """ return self._active_bands @active_bands.setter def active_bands(self, active_bands: Union[list, str, None]) -> None: """ :param active_bands: Sets active bands based on list given. If argument is 'all', all unique bands in `self.bands` will be used. :type active_bands: Union[list, str] """ if str(active_bands) == 'all': self._active_bands = list(np.unique(self.bands)) else: self._active_bands = active_bands @property def filtered_indices(self) -> Union[list, None]: """ :return: The list indices in `bands` associated with the active bands. :rtype: Union[list, None] """ if self.bands is None: return list(np.arange(len(self.x))) return [b in self.active_bands for b in self.bands] def get_filtered_data(self) -> tuple: """Used to filter flux density and photometry data, so we only use data that is using the active bands. :return: A tuple with the filtered data. Format is (x, x_err, y, y_err) :rtype: tuple """ if self.flux_density_data or self.magnitude_data: filtered_x = self.x[self.filtered_indices] try: filtered_x_err = self.x_err[self.filtered_indices] except (IndexError, TypeError): filtered_x_err = None filtered_y = self.y[self.filtered_indices] filtered_y_err = self.y_err[self.filtered_indices] return filtered_x, filtered_x_err, filtered_y, filtered_y_err else: raise ValueError(f"Transient needs to be in flux density or magnitude data mode, " f"but is in {self.data_mode} instead.") @property def unique_bands(self) -> np.ndarray: """ :return: All bands that we get from the data, eliminating all duplicates. :rtype: np.ndarray """ return np.unique(self.bands) @property def unique_frequencies(self) -> np.ndarray: """ :return: All frequencies that we get from the data, eliminating all duplicates. :rtype: np.ndarray """ try: if isinstance(self.unique_bands[0], (float, int)): return self.unique_bands except (TypeError, IndexError): pass return self.bands_to_frequency(self.unique_bands) @property def list_of_band_indices(self) -> list: """ :return: Indices that map between bands in the data and the unique bands we obtain. :rtype: list """ return [np.where(self.bands == np.array(b))[0] for b in self.unique_bands] @property def default_filters(self) -> list: """ :return: Default list of filters to use. :rtype: list """ return ["g", "r", "i", "z", "y", "J", "H", "K"] @staticmethod def get_colors(filters: Union[np.ndarray, list]) -> matplotlib.colors.Colormap: """ :param filters: Array of list of filters to use in the plot. :type filters: Union[np.ndarray, list] :return: Colormap with one color for each filter. :rtype: matplotlib.colors.Colormap """ return matplotlib.cm.rainbow(np.linspace(0, 1, len(filters))) def plot_data(self, axes: matplotlib.axes.Axes = None, filename: str = None, outdir: str = None, save: bool = True, show: bool = True, plot_others: bool = True, color: str = 'k', **kwargs) -> matplotlib.axes.Axes: """Plots the Transient data and returns Axes. :param axes: Matplotlib axes to plot the lightcurve into. Useful for user specific modifications to the plot. :param filename: Name of the file to be plotted in. :param outdir: The directory in which to save the file in. :param save: Whether to save the plot. (Default value = True) :param show: Whether to show the plot. (Default value = True) :param plot_others: Whether to plot inactive bands. (Default value = True) :param color: Color of the data. :param kwargs: Additional keyword arguments to pass in the Plotter methods. Available in the online documentation under at `redback.plotting.Plotter`. `print(Transient.plot_data.__doc__)` to see all options! :return: The axes with the plot. """ if self.flux_data: plotter = IntegratedFluxPlotter(transient=self, color=color, filename=filename, outdir=outdir, **kwargs) elif self.luminosity_data: plotter = LuminosityPlotter(transient=self, color=color, filename=filename, outdir=outdir, **kwargs) elif self.flux_density_data: plotter = FluxDensityPlotter(transient=self, color=color, filename=filename, outdir=outdir, plot_others=plot_others, **kwargs) elif self.magnitude_data: plotter = MagnitudePlotter(transient=self, color=color, filename=filename, outdir=outdir, plot_others=plot_others, **kwargs) else: return axes return plotter.plot_data(axes=axes, save=save, show=show) def plot_multiband( self, figure: matplotlib.figure.Figure = None, axes: matplotlib.axes.Axes = None, filename: str = None, outdir: str = None, ncols: int = 2, save: bool = True, show: bool = True, nrows: int = None, figsize: tuple = None, filters: list = None, **kwargs: None) \ -> matplotlib.axes.Axes: """ :param figure: Figure can be given if defaults are not satisfying :param axes: Axes can be given if defaults are not satisfying :param filename: Name of the file to be plotted in. :param outdir: The directory in which to save the file in. :param save: Whether to save the plot. (Default value = True) :param show: Whether to show the plot. (Default value = True) :param ncols: Number of columns to use on the plot. Default is 2. :param nrows: Number of rows to use on the plot. If None are given this will be inferred from ncols and the number of filters. :param figsize: Size of the figure. A default based on ncols and nrows will be used if None is given. :param filters: Which bands to plot. Will use default filters if None is given. :param kwargs: Additional keyword arguments to pass in the Plotter methods. Available in the online documentation under at `redback.plotting.Plotter`. `print(Transient.plot_multiband.__doc__)` to see all options! :return: The axes. """ if self.data_mode not in ['flux_density', 'magnitude']: raise ValueError( f'You cannot plot multiband data with {self.data_mode} data mode . Why are you doing this?') if self.magnitude_data: plotter = MagnitudePlotter(transient=self, filters=filters, filename=filename, outdir=outdir, nrows=nrows, ncols=ncols, figsize=figsize, **kwargs) elif self.flux_density_data: plotter = FluxDensityPlotter(transient=self, filters=filters, filename=filename, outdir=outdir, nrows=nrows, ncols=ncols, figsize=figsize, **kwargs) else: return return plotter.plot_multiband(figure=figure, axes=axes, save=save, show=show) def plot_lightcurve( self, model: callable, filename: str = None, outdir: str = None, axes: matplotlib.axes.Axes = None, save: bool = True, show: bool = True, random_models: int = 100, posterior: pd.DataFrame = None, model_kwargs: dict = None, **kwargs: None) -> matplotlib.axes.Axes: """ :param model: The model used to plot the lightcurve. :param filename: The output filename. Otherwise, use default which starts with the name attribute and ends with *lightcurve.png. :param axes: Axes to plot in if given. :param save:Whether to save the plot. :param show: Whether to show the plot. :param random_models: Number of random posterior samples plotted faintly. (Default value = 100) :param posterior: Posterior distribution to which to draw samples from. Is optional but must be given. :param outdir: Out directory in which to save the plot. Default is the current working directory. :param model_kwargs: Additional keyword arguments to be passed into the model. :param kwargs: Additional keyword arguments to pass in the Plotter methods. Available in the online documentation under at `redback.plotting.Plotter`. `print(Transient.plot_lightcurve.__doc__)` to see all options! :return: The axes. """ if self.flux_data: plotter = IntegratedFluxPlotter( transient=self, model=model, filename=filename, outdir=outdir, posterior=posterior, model_kwargs=model_kwargs, random_models=random_models, **kwargs) elif self.luminosity_data: plotter = LuminosityPlotter( transient=self, model=model, filename=filename, outdir=outdir, posterior=posterior, model_kwargs=model_kwargs, random_models=random_models, **kwargs) elif self.flux_density_data: plotter = FluxDensityPlotter( transient=self, model=model, filename=filename, outdir=outdir, posterior=posterior, model_kwargs=model_kwargs, random_models=random_models, **kwargs) elif self.magnitude_data: plotter = MagnitudePlotter( transient=self, model=model, filename=filename, outdir=outdir, posterior=posterior, model_kwargs=model_kwargs, random_models=random_models, **kwargs) else:
<filename>src/build/android/gyp/extract_unwind_tables.py #!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Extracts the unwind tables in from breakpad symbol files Runs dump_syms on the given binary file and extracts the CFI data into the given output file. The output file is a binary file containing CFI rows ordered based on function address. The output file only contains rows that match the most popular rule type in CFI table, to reduce the output size and specify data in compact format. See doc https://github.com/google/breakpad/blob/master/docs/symbol_files.md. 1. The CFA rules should be of postfix form "SP <val> +". 2. The RA rules should be of postfix form "CFA <val> + ^". Note: breakpad represents dereferencing address with '^' operator. The output file has 2 tables UNW_INDEX and UNW_DATA, inspired from ARM EHABI format. The first table contains function addresses and an index into the UNW_DATA table. The second table contains one or more rows for the function unwind information. The output file starts with 4 bytes counting the number of entries in UNW_INDEX. Then UNW_INDEX table and UNW_DATA table. UNW_INDEX contains two columns of N rows each, where N is the number of functions. 1. First column 4 byte rows of all the function start address as offset from start of the binary, in sorted order. 2. For each function addr, the second column contains 2 byte indices in order. The indices are offsets (in count of 2 bytes) of the CFI data from start of UNW_DATA. The last entry in the table always contains CANT_UNWIND index to specify the end address of the last function. UNW_DATA contains data of all the functions. Each function data contains N rows. The data found at the address pointed from UNW_INDEX will be: 2 bytes: N - number of rows that belong to current function. N * 4 bytes: N rows of data. 16 bits : Address offset from function start. 14 bits : CFA offset / 4. 2 bits : RA offset / 4. The function is not added to the unwind table in following conditions: C1. If length of the function code (number of instructions) is greater than 0xFFFF (2 byte address span). This is because we use 16 bits to refer to offset of instruction from start of the address. C2. If the function moves the SP by more than 0xFFFF bytes. This is because we use 14 bits to denote CFA offset (last 2 bits are 0). C3. If the Return Address is stored at an offset >= 16 from the CFA. Some functions which have variable arguments can have offset upto 16. TODO(ssid): We can actually store offset 16 by subtracting 1 from RA/4 since we never have 0. C4: Some functions do not have unwind information defined in dwarf info. These functions have index value CANT_UNWIND(0xFFFF) in UNW_INDEX table. Usage: extract_unwind_tables.py --input_path [root path to unstripped chrome.so] --output_path [output path] --dump_syms_path [path to dump_syms binary] """ import argparse import re import struct import subprocess import sys import tempfile _CFA_REG = '.cfa' _RA_REG = '.ra' _ADDR_ENTRY = 0 _LENGTH_ENTRY = 1 _CANT_UNWIND = 0xFFFF def _Write4Bytes(output_file, val): """Writes a 32 bit unsigned integer to the given output file.""" output_file.write(struct.pack('<L', val)); def _Write2Bytes(output_file, val): """Writes a 16 bit unsigned integer to the given output file.""" output_file.write(struct.pack('<H', val)); def _FindRuleForRegister(cfi_row, reg): """Returns the postfix expression as string for a given register. Breakpad CFI row format specifies rules for unwinding each register in postfix expression form separated by space. Each rule starts with register name and a colon. Eg: "CFI R1: <rule> R2: <rule>". """ out = [] found_register = False for part in cfi_row: if found_register: if part[-1] == ':': break out.append(part) elif part == reg + ':': found_register = True return ' '.join(out) def _GetCfaAndRaOffset(cfi_row): """Returns a tuple with 2 numbers (cfa_offset, ra_offset). Returns right values if rule matches the predefined criteria. Returns (0, 0) otherwise. The criteria for CFA rule is postfix form "SP <val> +" and RA rule is postfix form "CFA -<val> + ^". """ cfa_offset = 0 ra_offset = 0 cfa_rule = _FindRuleForRegister(cfi_row, _CFA_REG) ra_rule = _FindRuleForRegister(cfi_row, _RA_REG) if cfa_rule and re.match(r'sp [0-9]+ \+', cfa_rule): cfa_offset = int(cfa_rule.split()[1], 10) if ra_rule: if not re.match(r'.cfa -[0-9]+ \+ \^', ra_rule): return (0, 0) ra_offset = -1 * int(ra_rule.split()[1], 10) return (cfa_offset, ra_offset) def _GetAllCfiRows(symbol_file): """Returns parsed CFI data from given symbol_file. Each entry in the cfi data dictionary returned is a map from function start address to array of function rows, starting with FUNCTION type, followed by one or more CFI rows. """ cfi_data = {} current_func = [] for line in symbol_file: if 'STACK CFI' not in line: continue parts = line.split() data = {} if parts[2] == 'INIT': # Add the previous function to the output if len(current_func) > 1: cfi_data[current_func[0][_ADDR_ENTRY]] = current_func current_func = [] # The function line is of format "STACK CFI INIT <addr> <length> ..." data[_ADDR_ENTRY] = int(parts[3], 16) data[_LENGTH_ENTRY] = int(parts[4], 16) # Condition C1: Skip if length is large. if data[_LENGTH_ENTRY] == 0 or data[_LENGTH_ENTRY] > 0xffff: continue # Skip the current function. else: # The current function is skipped. if len(current_func) == 0: continue # The CFI row is of format "STACK CFI <addr> .cfa: <expr> .ra: <expr> ..." data[_ADDR_ENTRY] = int(parts[2], 16) (data[_CFA_REG], data[_RA_REG]) = _GetCfaAndRaOffset(parts) # Condition C2 and C3: Skip based on limits on offsets. if data[_CFA_REG] == 0 or data[_RA_REG] >= 16 or data[_CFA_REG] > 0xffff: current_func = [] continue assert data[_CFA_REG] % 4 == 0 # Since we skipped functions with code size larger than 0xffff, we should # have no function offset larger than the same value. assert data[_ADDR_ENTRY] - current_func[0][_ADDR_ENTRY] < 0xffff if data[_ADDR_ENTRY] == 0: # Skip current function, delete all previous entries. current_func = [] continue assert data[_ADDR_ENTRY] % 2 == 0 current_func.append(data) # Condition C4: Skip function without CFI rows. if len(current_func) > 1: cfi_data[current_func[0][_ADDR_ENTRY]] = current_func return cfi_data def _WriteCfiData(cfi_data, out_file): """Writes the CFI data in defined format to out_file.""" # Stores the final data that will be written to UNW_DATA table, in order # with 2 byte items. unw_data = [] # Represent all the CFI data of functions as set of numbers and map them to an # index in the |unw_data|. This index is later written to the UNW_INDEX table # for each function. This map is used to find index of the data for functions. data_to_index = {} # Store mapping between the functions to the index. func_addr_to_index = {} previous_func_end = 0 for addr, function in sorted(cfi_data.iteritems()): # Add an empty function entry when functions CFIs are missing between 2 # functions. if previous_func_end != 0 and addr - previous_func_end > 4: func_addr_to_index[previous_func_end + 2] = _CANT_UNWIND previous_func_end = addr + cfi_data[addr][0][_LENGTH_ENTRY] assert len(function) > 1 func_data_arr = [] func_data = 0 # The first row contains the function address and length. The rest of the # rows have CFI data. Create function data array as given in the format. for row in function[1:]: addr_offset = row[_ADDR_ENTRY] - addr cfa_offset = (row[_CFA_REG]) | (row[_RA_REG] / 4) func_data_arr.append(addr_offset) func_data_arr.append(cfa_offset) # Consider all the rows in the data as one large integer and add it as a key # to the |data_to_index|. for data in func_data_arr: func_data = (func_data << 16) | data row_count = len(func_data_arr) / 2 if func_data not in data_to_index: # When data is not found, create a new index = len(unw_data), and write # the data to |unw_data|. index = len(unw_data) data_to_index[func_data] = index unw_data.append(row_count) for row in func_data_arr: unw_data.append(row) else: # If the data was found, then use the same index for the function. index = data_to_index[func_data] assert row_count == unw_data[index] func_addr_to_index[addr] = data_to_index[func_data] # Mark the end end of last function entry. func_addr_to_index[previous_func_end + 2] = _CANT_UNWIND # Write the size of UNW_INDEX file in bytes. _Write4Bytes(out_file, len(func_addr_to_index)) # Write the UNW_INDEX table. First list of addresses and then indices.
availability also modified work.last_update_time. assert (datetime.datetime.utcnow() - work.last_update_time) < datetime.timedelta(seconds=2) # make a staff (admin interface) edition. its fields should supercede all others below it # except when it has no contributors, and they do. pool2.suppressed = False staff_edition = self._edition(data_source_name=DataSource.LIBRARY_STAFF, with_license_pool=False, authors=[]) staff_edition.title = u"The Staff Title" staff_edition.primary_identifier = pool2.identifier # set edition's authorship to "nope", and make sure the lower-priority # editions' authors don't get clobbered staff_edition.contributions = [] staff_edition.author = Edition.UNKNOWN_AUTHOR staff_edition.sort_author = Edition.UNKNOWN_AUTHOR work.calculate_presentation(search_index_client=index) # The title of the Work got superceded. eq_("The Staff Title", work.title) # The author of the Work is still the author of edition2 and was not clobbered. eq_("<NAME>, <NAME>", work.author) eq_("Adder, Alice ; Bitshifter, Bob", work.sort_author) def test_calculate_presentation_with_no_presentation_edition(self): # Calling calculate_presentation() on a work with no # presentation edition won't do anything, but at least it doesn't # crash. work = self._work() work.presentation_edition = None work.coverage_records = [] self._db.commit() work.calculate_presentation() # The work is not presentation-ready. eq_(False, work.presentation_ready) # Work was done to choose the presentation edition, but since no # presentation edition was found, no other work was done. [choose_edition] = work.coverage_records eq_(WorkCoverageRecord.CHOOSE_EDITION_OPERATION, choose_edition.operation) def test_calculate_presentation_sets_presentation_ready_based_on_content(self): # This work is incorrectly presentation-ready; its presentation # edition has no language. work = self._work(with_license_pool=True) edition = work.presentation_edition edition.language = None eq_(True, work.presentation_ready) work.calculate_presentation() eq_(False, work.presentation_ready) # Give it a language, and it becomes presentation-ready again. edition.language = "eng" work.calculate_presentation() eq_(True, work.presentation_ready) def test_calculate_presentation_uses_default_audience_set_as_collection_setting(self): default_audience = Classifier.AUDIENCE_ADULT collection = self._default_collection collection.default_audience = default_audience edition, pool = self._edition( DataSource.GUTENBERG, Identifier.GUTENBERG_ID, collection=collection, with_license_pool=True, with_open_access_download=True ) work = self._slow_work(presentation_edition=edition) work.last_update_time = None work.presentation_ready = True work.calculate_presentation() eq_(default_audience, work.audience) def test__choose_summary(self): # Test the _choose_summary helper method, called by # calculate_presentation(). class Mock(Work): def set_summary(self, summary): if isinstance(summary, Resource): self.summary_text = summary.representation.content else: self.summary_text = summary w = Mock() w.the_summary = "old summary" self._db.add(w) m = w._choose_summary # If no summaries are available, any old summary is cleared out. m([], [], []) eq_(None, w.summary_text) # Create three summaries on two identifiers. source1 = DataSource.lookup(self._db, DataSource.OVERDRIVE) source2 = DataSource.lookup(self._db, DataSource.BIBLIOTHECA) i1 = self._identifier() l1, ignore = i1.add_link( Hyperlink.DESCRIPTION, None, source1, content="ok summary" ) good_summary = "This summary is great! It's more than one sentence long and features some noun phrases." i1.add_link( Hyperlink.DESCRIPTION, None, source2, content=good_summary ) i2 = self._identifier() i2.add_link( Hyperlink.DESCRIPTION, None, source2, content="not too bad" ) # Now we can test out the rules for choosing summaries. # In a choice between all three summaries, good_summary is # chosen based on textual characteristics. m([], [i1.id, i2.id], []) eq_(good_summary, w.summary_text) m([i1.id, i2.id], [], []) eq_(good_summary, w.summary_text) # If an identifier is associated directly with the work, its # summaries are considered first, and the other identifiers # are not considered at all. m([i2.id], [object(), i1.id], []) eq_("not too bad", w.summary_text) # A summary that comes from a preferred data source will be # chosen over some other summary. m([i1.id, i2.id], [], [source1]) eq_("ok summary", w.summary_text) # But if there is no summary from a preferred data source, the # normal rules apply. source3 = DataSource.lookup(self._db, DataSource.AXIS_360) m([i1.id], [], [source3]) eq_(good_summary, w.summary_text) # LIBRARY_STAFF is always considered a good source of # descriptions. l1.data_source = DataSource.lookup( self._db, DataSource.LIBRARY_STAFF ) m([i1.id, i2.id], [], []) eq_(l1.resource.representation.content, w.summary_text) def test_set_presentation_ready_based_on_content(self): work = self._work(with_license_pool=True) search = MockExternalSearchIndex() # This is how the work will be represented in the dummy search # index. index_key = (search.works_index, MockExternalSearchIndex.work_document_type, work.id) presentation = work.presentation_edition work.set_presentation_ready_based_on_content(search_index_client=search) eq_(True, work.presentation_ready) # The work has not been added to the search index. eq_([], search.docs.keys()) # But the work of adding it to the search engine has been # registered. def assert_record(): # Verify the search index WorkCoverageRecord for this work # is in the REGISTERED state. [record] = [ x for x in work.coverage_records if x.operation==WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION ] eq_(WorkCoverageRecord.REGISTERED, record.status) assert_record() # This work is presentation ready because it has a title. # Remove the title, and the work stops being presentation # ready. presentation.title = None work.set_presentation_ready_based_on_content(search_index_client=search) eq_(False, work.presentation_ready) # The search engine WorkCoverageRecord is still in the # REGISTERED state, but its meaning has changed -- the work # will now be _removed_ from the search index, rather than # updated. assert_record() # Restore the title, and everything is fixed. presentation.title = u"foo" work.set_presentation_ready_based_on_content(search_index_client=search) eq_(True, work.presentation_ready) # Remove the medium, and the work stops being presentation ready. presentation.medium = None work.set_presentation_ready_based_on_content(search_index_client=search) eq_(False, work.presentation_ready) presentation.medium = Edition.BOOK_MEDIUM work.set_presentation_ready_based_on_content(search_index_client=search) eq_(True, work.presentation_ready) # Remove the language, and it stops being presentation ready. presentation.language = None work.set_presentation_ready_based_on_content(search_index_client=search) eq_(False, work.presentation_ready) presentation.language = 'eng' work.set_presentation_ready_based_on_content(search_index_client=search) eq_(True, work.presentation_ready) # Remove the fiction status, and the work is still # presentation ready. Fiction status used to make a difference, but # it no longer does. work.fiction = None work.set_presentation_ready_based_on_content(search_index_client=search) eq_(True, work.presentation_ready) def test_assign_genres_from_weights(self): work = self._work() # This work was once classified under Fantasy and Romance. work.assign_genres_from_weights({Romance : 1000, Fantasy : 1000}) self._db.commit() before = sorted((x.genre.name, x.affinity) for x in work.work_genres) eq_([(u'Fantasy', 0.5), (u'Romance', 0.5)], before) # But now it's classified under Science Fiction and Romance. work.assign_genres_from_weights({Romance : 100, Science_Fiction : 300}) self._db.commit() after = sorted((x.genre.name, x.affinity) for x in work.work_genres) eq_([(u'Romance', 0.25), (u'Science Fiction', 0.75)], after) def test_classifications_with_genre(self): work = self._work(with_open_access_download=True) identifier = work.presentation_edition.primary_identifier genres = self._db.query(Genre).all() subject1 = self._subject(type="type1", identifier="subject1") subject1.genre = genres[0] subject2 = self._subject(type="type2", identifier="subject2") subject2.genre = genres[1] subject3 = self._subject(type="type2", identifier="subject3") subject3.genre = None source = DataSource.lookup(self._db, DataSource.AXIS_360) classification1 = self._classification( identifier=identifier, subject=subject1, data_source=source, weight=1) classification2 = self._classification( identifier=identifier, subject=subject2, data_source=source, weight=2) classification3 = self._classification( identifier=identifier, subject=subject3, data_source=source, weight=2) results = work.classifications_with_genre().all() eq_([classification2, classification1], results) def test_mark_licensepools_as_superceded(self): # A commercial LP that somehow got superceded will be # un-superceded. commercial = self._licensepool( None, data_source_name=DataSource.OVERDRIVE ) work, is_new = commercial.calculate_work() commercial.superceded = True work.mark_licensepools_as_superceded() eq_(False, commercial.superceded) # An open-access LP that was superceded will be un-superceded if # chosen. gutenberg = self._licensepool( None, data_source_name=DataSource.GUTENBERG, open_access=True, with_open_access_download=True ) work, is_new = gutenberg.calculate_work() gutenberg.superceded = True work.mark_licensepools_as_superceded() eq_(False, gutenberg.superceded) # Of two open-access LPs, the one from the higher-quality data # source will be un-superceded, and the one from the # lower-quality data source will be superceded. standard_ebooks = self._licensepool( None, data_source_name=DataSource.STANDARD_EBOOKS, open_access=True, with_open_access_download=True ) work.license_pools.append(standard_ebooks) gutenberg.superceded = False standard_ebooks.superceded = True work.mark_licensepools_as_superceded() eq_(True, gutenberg.superceded) eq_(False, standard_ebooks.superceded) # Of three open-access pools, 1 and only 1 will be chosen as non-superceded. gitenberg1 = self._licensepool(edition=None, open_access=True, data_source_name=DataSource.PROJECT_GITENBERG, with_open_access_download=True ) gitenberg2 = self._licensepool(edition=None, open_access=True, data_source_name=DataSource.PROJECT_GITENBERG, with_open_access_download=True ) gutenberg1 = self._licensepool(edition=None, open_access=True, data_source_name=DataSource.GUTENBERG, with_open_access_download=True ) work_multipool = self._work(presentation_edition=None) work_multipool.license_pools.append(gutenberg1) work_multipool.license_pools.append(gitenberg2) work_multipool.license_pools.append(gitenberg1) # pools aren't yet aware of each other eq_(gutenberg1.superceded, False) eq_(gitenberg1.superceded, False) eq_(gitenberg2.superceded, False) # make pools figure out who's best work_multipool.mark_licensepools_as_superceded() eq_(gutenberg1.superceded, True) # There's no way to choose between the two gitenberg pools, # so making sure only one has been chosen is enough. chosen_count = 0 for chosen_pool in gutenberg1, gitenberg1, gitenberg2: if chosen_pool.superceded is False: chosen_count += 1; eq_(chosen_count, 1) # throw wrench in gitenberg1.suppressed = True # recalculate bests work_multipool.mark_licensepools_as_superceded() eq_(gutenberg1.superceded, True) eq_(gitenberg1.superceded, True) eq_(gitenberg2.superceded, False) # A suppressed pool won't be superceded if it's the only pool for a work. only_pool = self._licensepool( None, open_access=True, with_open_access_download=True ) work, ignore = only_pool.calculate_work() only_pool.suppressed = True work.mark_licensepools_as_superceded() eq_(False, only_pool.superceded) def test_work_remains_viable_on_pools_suppressed(self): """ If a work has all of its pools suppressed, the work's author, title, and subtitle still have the last best-known info in them. """ (work, pool_std_ebooks, pool_git, pool_gut, edition_std_ebooks, edition_git, edition_gut, alice, bob) = self._sample_ecosystem() # make sure the setup is what we expect eq_(pool_std_ebooks.suppressed, False) eq_(pool_git.suppressed, False) eq_(pool_gut.suppressed, False) # sanity check - we like standard ebooks and it got determined to be the best eq_(work.presentation_edition, pool_std_ebooks.presentation_edition) eq_(work.presentation_edition, edition_std_ebooks) # editions know who's the presentation edition eq_(edition_std_ebooks.work, work) eq_(edition_git.work, None) eq_(edition_gut.work, None) # The title of the Work is the title of its presentation edition. eq_("The Standard Ebooks Title", work.title) eq_("The Standard Ebooks Subtitle", work.subtitle) # The author of the Work
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import os import logging import urllib from ast import literal_eval as make_tuple from calendar import month_name from celery import shared_task import requests from django.conf import settings from mmw.settings import layer_classmaps from apps.modeling.geoprocessing import multi, parse from apps.modeling.tr55.utils import aoi_resolution from apps.modeling.tasks import run_gwlfe from apps.modeling.mapshed.tasks import (NOCACHE, collect_data, convert_data, nlcd_streams, ) from apps.geoprocessing_api.calcs import (animal_population, point_source_pollution, catchment_water_quality, stream_data, streams_for_huc12s, huc12s_with_aois, drexel_fast_zonal, ) logger = logging.getLogger(__name__) DRB = 'drb' RWD_HOST = os.environ.get('RWD_HOST', 'localhost') RWD_PORT = os.environ.get('RWD_PORT', '5000') ACRES_PER_SQM = 0.000247105 CM_PER_MM = 0.1 M_PER_CM = 0.01 @shared_task def start_rwd_job(location, snapping, simplify, data_source): """ Calls the Rapid Watershed Delineation endpoint that is running in the Docker container, and returns the response unless there is an out-of-watershed error which raises an exception. """ lat, lng = location end_point = 'rwd' if data_source == DRB else 'rwd-nhd' rwd_url = 'http://%s:%s/%s/%f/%f' % (RWD_HOST, RWD_PORT, end_point, lat, lng) params = {} # The Webserver defaults to enable snapping, uses 1 (true) 0 (false) if not snapping: params['snapping'] = 0 # RWD also defaults to simplify the shape according to a tolerance. # Passing it `?simplify=0` returns the unsimplified result. if simplify is not False: params['simplify'] = simplify query_string = urllib.urlencode(params) if query_string: rwd_url += ('?%s' % query_string) logger.debug('rwd request: %s' % rwd_url) response_json = requests.get(rwd_url).json() if 'error' in response_json: raise Exception(response_json['error']) return response_json @shared_task def analyze_streams(results, area_of_interest): """ Given geoprocessing results with stream data and an area of interest, returns the streams and stream order within it. """ return {'survey': stream_data(results, area_of_interest)} @shared_task def analyze_animals(area_of_interest): """ Given an area of interest, returns the animal population within it. """ return {'survey': animal_population(area_of_interest)} @shared_task def analyze_pointsource(area_of_interest): """ Given an area of interest, returns point sources of pollution within it. """ return {'survey': point_source_pollution(area_of_interest)} @shared_task def analyze_catchment_water_quality(area_of_interest): """ Given an area of interest in the DRB, returns catchment water quality data within it. """ return {'survey': catchment_water_quality(area_of_interest)} @shared_task(throws=Exception) def analyze_nlcd(result, area_of_interest=None, nlcd_year='2011_2011'): if 'error' in result: raise Exception('[analyze_nlcd_{}] {}'.format( nlcd_year, result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 result = parse(result) histogram = {} total_ara = 0 total_count = 0 categories = [] def area(dictionary, key, default=0): return dictionary.get(key, default) * pixel_width * pixel_width # Convert results to histogram, calculate total for key, count in result.iteritems(): nlcd, ara = key total_count += count total_ara += count if ara == 1 else 0 histogram[nlcd] = count + histogram.get(nlcd, 0) has_ara = total_ara > 0 for nlcd, (code, name) in layer_classmaps.NLCD.iteritems(): categories.append({ 'area': area(histogram, nlcd), 'active_river_area': area(result, (nlcd, 1)) if has_ara else None, 'code': code, 'coverage': float(histogram.get(nlcd, 0)) / total_count, 'nlcd': nlcd, 'type': name, }) return { 'survey': { 'name': 'land_{}'.format(nlcd_year), 'displayName': 'Land Use/Cover {} (NLCD{})'.format( nlcd_year[5:], nlcd_year[2:4]), 'categories': categories, } } @shared_task(throws=Exception) def analyze_soil(result, area_of_interest=None): if 'error' in result: raise Exception('[analyze_soil] {}'.format(result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 histogram = {} total_count = 0 categories = [] # Convert results to histogram, calculate total for key, count in result.iteritems(): total_count += count s = make_tuple(key[4:]) # Change {"List(1)":5} to {1:5} s = s if s != settings.NODATA else 3 # Map NODATA to 3 histogram[s] = count + histogram.get(s, 0) for soil, (code, name) in layer_classmaps.SOIL.iteritems(): categories.append({ 'area': histogram.get(soil, 0) * pixel_width * pixel_width, 'code': code, 'coverage': float(histogram.get(soil, 0)) / total_count, 'type': name, }) return { 'survey': { 'name': 'soil', 'displayName': 'Soil', 'categories': categories, } } @shared_task(throws=Exception) def analyze_climate(result, wkaoi): """ Given the result of multigeoprocessing call for climate rasters, combines them so that the 'ppt' values are grouped together and 'tmean' together. Each group is a dictionary where the keys are strings of the month '1', '2', ..., '12', and the values the average in the area of interest. Then, transforms these dictionaries into a final result of the format used for all other Analyze operations. The 'categories' contain twelve objects, one for each month, with a 'month' field containing the name of the month, and 'ppt' and 'tmean' fields with corresponding values. The 'index' can be used for sorting purposes on the client side. """ if 'error' in result: raise Exception('[analyze_climate] {}'.format(result['error'])) ppt = {k[5:]: v['List(0)'] for k, v in result[wkaoi].items() if 'ppt' in k} tmean = {k[7:]: v['List(0)'] for k, v in result[wkaoi].items() if 'tmean' in k} categories = [{ 'monthidx': i, 'month': month_name[i], 'ppt': ppt[str(i)] * CM_PER_MM, 'tmean': tmean[str(i)], } for i in xrange(1, 13)] return { 'survey': { 'name': 'climate', 'displayName': 'Climate', 'categories': categories } } @shared_task def analyze_terrain(result): """ Given a geoprocessing result in the shape of: [ { "avg": 2503.116786250801, "max": 10501.0, "min": -84.0 }, { "avg": 2.708598957407307, "max": 44.52286911010742, "min": 0.0 } ] Assumes the first result is for Elevation in cm and the second for Slope in %, and transforms it into a dictionary of the shape: [ { "elevation": 25.03116786250801, "slope": 2.708598957407307, "type": "average" }, { "elevation": -0.84, "slope": 0.0, "type": "minimum" }, { "elevation": 105.01, "slope": 44.52286911010742, "type": "maximum" } ] which has Elevation in m and keeps Slope in %. """ if 'error' in result: raise Exception('[analyze_terrain] {}'.format(result['error'])) [elevation, slope] = result def cm_to_m(x): return x * M_PER_CM if x else None categories = [ dict(type='average', elevation=cm_to_m(elevation['avg']), slope=slope['avg']), dict(type='minimum', elevation=cm_to_m(elevation['min']), slope=slope['min']), dict(type='maximum', elevation=cm_to_m(elevation['max']), slope=slope['max']) ] return { 'survey': { 'name': 'terrain', 'displayName': 'Terrain', 'categories': categories } } @shared_task def analyze_protected_lands(result, area_of_interest=None): if 'error' in result: raise Exception('[analyze_protected_lands] {}'.format(result['error'])) pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1 result = parse(result) histogram = {} total_count = 0 categories = [] for key, count in result.iteritems(): total_count += count histogram[key] = count + histogram.get(key, 0) for class_id, (code, name) in layer_classmaps.PROTECTED_LANDS.iteritems(): categories.append({ 'area': histogram.get(class_id, 0) * pixel_width * pixel_width, 'class_id': class_id, 'code': code, 'coverage': float(histogram.get(class_id, 0)) / total_count, 'type': name, }) return { 'survey': { 'name': 'protected_lands', 'displayName': 'Protected Lands', 'categories': categories, } } @shared_task def analyze_drb_2100_land(area_of_interest, key): result = drexel_fast_zonal(area_of_interest, key) histogram = {} total_count = 0 categories = [] for nlcd, count in result.iteritems(): total_count += count histogram[nlcd] = count + histogram.get(nlcd, 0) for nlcd, (code, name) in layer_classmaps.NLCD.iteritems(): categories.append({ 'area': histogram.get(nlcd, 0), 'code': code, 'coverage': float(histogram.get(nlcd, 0)) / total_count, 'nlcd': nlcd, 'type': name, }) return { 'survey': { 'name': 'drb_2100_land_{}'.format(key), 'displayName': 'DRB 2100 land forecast ({})'.format(key), 'categories': categories, } } def collect_nlcd(histogram, geojson=None): """ Convert raw NLCD geoprocessing result to area dictionary """ pixel_width = aoi_resolution(geojson) if geojson else 1 categories = [{ 'area': histogram.get(nlcd, 0) * pixel_width * pixel_width, 'code': code, 'nlcd': nlcd, 'type': name, } for nlcd, (code, name) in layer_classmaps.NLCD.iteritems()] return {'categories': categories} @shared_task def collect_worksheet_aois(result, shapes): """ Given a geoprocessing result of NLCD and NLCD+Streams for every area of interest within every HUC-12, processes the raw results and returns a dictionary a area of interest IDs corresponding to their processed results. """ if 'error' in result: raise Exception('[collect_worksheet_aois] {}' .format(result['error'])) NULL_RESULT = {'nlcd_streams': {}, 'nlcd': {}} collection = {} for shape in shapes: output = result.get(shape['id'], NULL_RESULT) nlcd = collect_nlcd(parse(output['nlcd']), shape['shape']) streams = stream_data(nlcd_streams(output['nlcd_streams']), shape['shape']) collection[shape['id']] = {'nlcd': nlcd, 'streams': streams} return collection @shared_task def collect_worksheet_wkaois(result, shapes): """ Given a geoprocessing result of MapShed and a list of HUC-12s, processes the raw results through GWLFE and returns a dictionary of WKAOIs to the modeled results, and also the processed NLCD and NLCD+Streams. """ if 'error' in result: raise Exception('[collect_worksheet_wkaois] {}' .format(result['error'])) collection = {} for shape in shapes: wkaoi = shape['id'] geojson = shape['shape'] converted = convert_data(result, wkaoi) histogram = converted[0]['n_count'] collected = collect_data(converted, geojson) modeled = run_gwlfe(collected, None, None) collection[wkaoi] = { 'mapshed': collected, 'gwlfe': modeled, 'nlcd': collect_nlcd(histogram, geojson), 'streams': stream_data(nlcd_streams(result[wkaoi]['nlcd_streams']), geojson) } return collection @shared_task(time_limit=300) def collect_worksheet(area_of_interest): """ Given an area of interest, matches it to HUC-12s and generates a dictionary containing land and stream analysis for the matched AoIs, land and stream analysis for the matched HUC-12s, and GWLF-E results for the HUC-12s. This dictionary can be POSTed to /export/worksheet to generate an Excel worksheet containing these values, which can be used for further modeling. """ def to_aoi_id(m): return '{}-{}'.format(NOCACHE, m['wkaoi']) matches = huc12s_with_aois(area_of_interest) huc12_ids = [m['huc12'] for m in matches] streams = streams_for_huc12s(huc12_ids)[0] aoi_shapes = [{
of graph in which to situate the agent to ''' self.bookkeep(agent) node.agent_content = agent self.node_from_agent_id_[agent.agent_id_system] = node def sample(self, phrase, generation=0): '''Sample the agent management system according to a named sampler Parameters ---------- phrase : str Name of the sampling to perform generation : int, optional Meta data about when the agent management system is sampled. If not specified set to zero. Returns ------- xxx ''' if not phrase in self.sampler: raise KeyError('Agent Management System lacks sampler for ' + \ '%s' %(phrase)) else: the_sampler = self.sampler[phrase] return the_sampler(self, generation) def switch_node_content(self, node_1, node_2, switch_agent=True, switch_aux=True): '''Switch content of a pair of nodes Parameters ---------- node_1 : Node The first node to operate on in the switch node_2 : Node The second node to operate on in the switch switch_agent : bool, optional If True, the agents of the pair of nodes should be interchanged. If a node does not contain an agent, instead the empty None, that empty space is interchanged. If False, the agents of the nodes are not interchanged. switch_aux : bool, optional If True, the local environments of the pair of nodes should be interchanged. If a node does not contain a local environment, instead the empty None, the empty spots are interchanged. If False, the local environments are left as is. ''' if switch_agent: agent_1 = node_1.agent_content agent_2 = node_2.agent_content node_1.agent_content = agent_2 node_2.agent_content = agent_1 if not agent_1 is None: self.node_from_agent_id_[agent_1.agent_id_system] = node_2 if not agent_2 is None: self.node_from_agent_id_[agent_2.agent_id_system] = node_1 if switch_aux: aux_1 = node_1.aux_content aux_2 = node_2.aux_content node_1.aux_content = aux_2 node_2.aux_content = aux_1 def compel(self, agent, phrase, validate_lawbook=False): '''Verb for the agent management system to execute a Compulsion Notes ----- The method collects the Compulsion associated with the input phrase and compels the given agent accordingly Parameters ---------- agent : Agent Agent to be compelled phrase : str Name of the compulsion to execute validate_lawbook : bool, optional If True, validate that Compulsion of given phrase should be possible to apply to given agent. Raises ------ KeyError If system contains no compulsion with the phrase TypeError If it is not an agent that is compelled RuntimeError If the agent does not have the phrase in the law book ''' if not isinstance(agent, Agent): raise TypeError('Only instances of the Agent class can be ' + \ 'compelled by Agent System') if not phrase in self.compulsion: raise KeyError('Agent System lacks compulsion for %s' %(phrase)) else: the_compulsion = self.compulsion[phrase] if validate_lawbook: if not phrase in self.lawbook[agent.agent_id_system]: raise RuntimeError('Compulsion %s is not in law book ' %(phrase) + \ 'for agent with ID %s' %(agent.agent_id_system)) did_it_compel = the_compulsion(agent.agent_id_system) if self.strict_engine and (not did_it_compel is True): raise did_it_compel agent.apply_map(the_compulsion.scaffold_map_output) return did_it_compel def mutate(self, agent, phrase, validate_lawbook=False): '''Verb for the agent management system to execute a Mutation or MultiMutation Notes ----- The method collects the Mutation (or MultiMutation) associated with the input phrase and compels the given agent accordingly Parameters ---------- agent : Agent Agent to be mutated phrase : str Name of the mutation to execute validate_lawbook : bool, optional If True, validate that Mutation of given phrase should be possible to apply to given agent. Raises ------ KeyError If system contains no mutation with the phrase TypeError If it is not an agent that is mutated RuntimeError If the agent does not have the phrase in the law book ''' if not isinstance(agent, Agent): raise TypeError('Only instances of the Agent class can be ' + \ 'mutated by Agent System') if not phrase in self.mutation: raise KeyError('Agent System lacks mutation for %s' %(phrase)) else: the_mutation = self.mutation[phrase] if validate_lawbook: if not phrase in self.lawbook[agent.agent_id_system]: raise RuntimeError('Compulsion %s is not in law book ' %(phrase) + \ 'for agent with ID %s' %(agent.agent_id_system)) did_it_mutate = the_mutation(agent.agent_id_system) if self.strict_engine and (not did_it_mutate is True): raise did_it_mutate agent.apply_map(the_mutation.scaffold_map_output) return did_it_mutate def engage_all_verbs(self, agent, validate_lawbook=False): '''Convenience function to apply all verbs to the given agent Parameters ---------- agent : Agent The agent to apply verbs to validate_lawbook : bool, optional If True, the system law book is used to selectively apply only laws that have jurisdiction over the given agent ''' ret = True for law_type in self.law: for phrase, law in self.law[law_type].items(): if validate_lawbook: if not phrase in self.lawbook[agent.agent_id_system]: continue if law_type == 'compulsion': ret_tmp = self.compel(agent, phrase, validate_lawbook) ret = ret and ret_tmp elif law_type == 'mutation': ret_tmp = self.mutate(agent, phrase, validate_lawbook) ret = ret and ret_tmp def make_lawbook_entry(self, law_phrases, agent_name_selector=None, agent_type_selector=None, agent_ids=None): '''Enter a connection between agents as certain law phrases, such that the agent management system can enforce certain laws being applied to certain agents only Parameters ---------- law_phrases : Iterable Collection of phrases, or names, of laws that have been added to the system agent_name_selector : callable, optional Function that receives an agent name and returns either True or False, where the former is interpreted as that the given law phrases apply to the corresponding set of agents of the system agent_type_selector : set of Classes, optional Child Agent Classes so that only agents of one of these child classes is associated with the give law phrase agent_ids : iterable, optional Collection of agent IDs, strings, for which the given law phrases apply. Notes ----- At least one of the `agent_name_selector` or `agent_ids` has to be given. The method updates the law book, hence the method can be called multiple times in order to fully populate the law book. If an agent matches no law phrase, its entry is None. The law book is not required and is not enforced unless relevant methods, such as `compel` and `mutate`, are explicitly instructed to do so. ''' for agent in self.cycle_nodes(True, len(self)): word = self.lawbook.setdefault(agent.agent_id_system, None) if not agent_name_selector is None: if agent_name_selector(agent.name): word = law_phrases elif not agent_type_selector is None: if type(agent, agent_type_selector): word = law_phrases elif not agent_ids is None: if agent.agent_id_system in agent_ids: word = law_phrases else: raise ValueError('One of agent_name_selector or agent_ids must be set') self.lawbook[agent.agent_id_system] = word def move(self, code_name): '''Move the system according to a Mover Parameters ---------- code_name : str Name of mover to apply to system Raises ------ KeyError If the agent system manager has no mover of given code name ''' if not code_name in self.mover: raise KeyError('AMS lacks mover with code name %s' %(code_name)) else: the_mover = self.mover[code_name] the_mover.move_by(self) def get_n_nodes(self): '''Return number of nodes in agent graph''' return nx.number_of_nodes(self.agents_graph) def get_n_edges(self): '''Return number of edges in agent graph''' return nx.number_of_edges(self.agents_graph) def get_n_agents(self): '''Return number of agents in the system''' return len(self.agents_in_scope) def __len__(self): '''Return number of agents in the system''' return self.get_n_agents() def __init__(self, name, agents, full_agents_graph=None, agent_env=None, common_env=None, strict_engine=False): self.name = name self.strict_engine = strict_engine # # The agent to agent network relation is defined, which is a complete # graph in case nothing specific is given. # if full_agents_graph is None: if not agent_env is None: if (not isinstance(agent_env, Iterable)) or isinstance(agent_env, str): agent_envs = [agent_env] * len(agents) else: if len(agent_env) != len(agents): raise ValueError('An iterable of agent environments ' + \ 'of wrong length %s' %(str(len(agent_env)))) agent_envs = agent_env else: agent_envs = [None] * len(agents) nodes = [] for k, (agent, agent_env) in enumerate(zip(agents, agent_envs)): nodes.append(Node('agent_%s'%(str(k)), agent, agent_env)) self.agents_graph = nx.complete_graph(nodes) # # If a network is given it is assumed to contain all objects, and hence # swapped directly into the graph attribute # else: if isinstance(full_agents_graph, nx.Graph): self.agents_graph = full_agents_graph else: raise TypeError('Agent Management System given graph not ' + \ 'of the Graph class') self.agents_graph.name = 'Agents Graph of System %s' %(self.name) self.common_env = common_env # # The agents are added to the system book keeping # self.agents_in_scope = OrderedDict() for agent in agents: self.bookkeep(agent) # #
2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)]) testing_lib.add_chords_to_sequence( expected_sequence, [('N.C.', 0), ('F', 1), ('C', 4)]) self.assertProtoEquals(expected_sequence, sequences[0]) class OneHotDrumsConverterTest(BaseOneHotDataTest, tf.test.TestCase): def setUp(self): sequence = music_pb2.NoteSequence() sequence.tempos.add(qpm=60) testing_lib.add_track_to_sequence( sequence, 0, [(35, 100, 0, 10), (44, 55, 1, 2), (40, 45, 4, 5), (35, 45, 9, 10), (40, 45, 13, 13), (55, 120, 16, 18), (60, 100, 16, 17), (53, 99, 19, 20)], is_drum=True) testing_lib.add_track_to_sequence( sequence, 1, [(35, 55, 1, 2), (40, 45, 25, 26), (55, 120, 28, 30), (60, 100, 28, 29), (53, 99, 31, 33)], is_drum=True) self.sequence = sequence expected_unsliced_events = [ (1, 5, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, NO_DRUMS), (NO_DRUMS, 1, NO_DRUMS, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, 160, NO_DRUMS, NO_DRUMS, 256), (NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, 160, NO_DRUMS, NO_DRUMS, 256) ] self.expected_unsliced_labels = [ np.array(es) for es in expected_unsliced_events] expected_sliced_events = [ (1, 5, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, NO_DRUMS), (NO_DRUMS, 1, NO_DRUMS, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS), (NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, 160, NO_DRUMS, NO_DRUMS, 256) ] self.expected_sliced_labels = [ np.array(es) for es in expected_sliced_events] self.converter_class = data.DrumsConverter def testMaxOutputsPerNoteSequence(self): converter = data.DrumsConverter( steps_per_quarter=1, slice_bars=1, max_tensors_per_notesequence=2) self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs)) converter.max_tensors_per_notesequence = 3 self.assertEqual(3, len(converter.to_tensors(self.sequence).inputs)) converter.max_tensors_per_notesequence = 100 self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs)) def testIsTraining(self): converter = data.DrumsConverter( steps_per_quarter=1, slice_bars=1, max_tensors_per_notesequence=2) converter.set_mode('train') self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs)) converter.max_tensors_per_notesequence = None self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs)) def testToNoteSequence(self): converter = data.DrumsConverter( steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1) tensors = converter.to_tensors( filter_instrument(self.sequence, 1)) sequences = converter.to_notesequences(tensors.outputs) self.assertEqual(1, len(sequences)) expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220) expected_sequence.tempos.add(qpm=120) testing_lib.add_track_to_sequence( expected_sequence, 9, [(38, 80, 0.5, 1.0), (48, 80, 2.0, 2.5), (49, 80, 2.0, 2.5), (51, 80, 3.5, 4.0)], is_drum=True) self.assertProtoEquals(expected_sequence, sequences[0]) class RollInputsOneHotDrumsConverterTest(OneHotDrumsConverterTest): def labels_to_inputs(self, labels, converter): inputs = [] for label_arr in labels: input_ = np.zeros((len(label_arr), converter.input_depth), converter.input_dtype) for i, l in enumerate(label_arr): if l == converter.end_token: input_[i, -2] = 1 elif l == 0: input_[i, -1] = 1 else: j = 0 while l: input_[i, j] = l % 2 l >>= 1 j += 1 assert np.any(input_[i]), label_arr.astype(np.int) inputs.append(input_) return inputs def setUp(self): super(RollInputsOneHotDrumsConverterTest, self).setUp() self.converter_class = functools.partial( data.DrumsConverter, roll_input=True) class RollOutputsDrumsConverterTest(BaseDataTest, tf.test.TestCase): def setUp(self): sequence = music_pb2.NoteSequence() sequence.tempos.add(qpm=60) testing_lib.add_track_to_sequence( sequence, 0, [(35, 100, 0, 10), (35, 55, 1, 2), (44, 55, 1, 2), (40, 45, 4, 5), (35, 45, 9, 10), (40, 45, 13, 13), (55, 120, 16, 18), (60, 100, 16, 17), (53, 99, 19, 20), (40, 45, 33, 34), (55, 120, 36, 37), (60, 100, 36, 37), (53, 99, 39, 42)], is_drum=True) testing_lib.add_track_to_sequence( sequence, 1, [(35, 100, 5, 10), (35, 55, 6, 8), (44, 55, 7, 9)], is_drum=False) self.sequence = sequence def testSliced(self): expected_sliced_events = [ ([0], [0, 2], [], [], [1], [], [], []), ([], [0], [], [], [], [1], [], []), ([], [1], [], [], [5, 7], [], [], [8]), ] expected_silent_array = np.array([ [0, 0, 1, 1, 0, 1, 1, 1], [1, 0, 1, 1, 1, 0, 1, 1], [1, 0, 1, 1, 0, 1, 1, 0], ]) expected_output_tensors = np.zeros( (len(expected_sliced_events), 8, len(data.REDUCED_DRUM_PITCH_CLASSES)), np.bool) for i, events in enumerate(expected_sliced_events): for j, e in enumerate(events): expected_output_tensors[i, j, e] = 1 converter = data.DrumsConverter( pitch_classes=data.REDUCED_DRUM_PITCH_CLASSES, slice_bars=2, steps_per_quarter=1, roll_input=True, roll_output=True, max_tensors_per_notesequence=None) self.assertEqual(10, converter.input_depth) self.assertEqual(9, converter.output_depth) tensors = converter.to_tensors(self.sequence) self.assertArraySetsEqual( np.append( expected_output_tensors, np.expand_dims(expected_silent_array, axis=2), axis=2), tensors.inputs) self.assertArraySetsEqual(expected_output_tensors, tensors.outputs) def testToNoteSequence(self): converter = data.DrumsConverter( pitch_classes=data.REDUCED_DRUM_PITCH_CLASSES, slice_bars=None, gap_bars=None, steps_per_quarter=1, roll_input=True, roll_output=True, max_tensors_per_notesequence=None) tensors = converter.to_tensors(self.sequence) sequences = converter.to_notesequences(tensors.outputs) self.assertEqual(1, len(sequences)) expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220) expected_sequence.tempos.add(qpm=120) testing_lib.add_track_to_sequence( expected_sequence, 0, [(36, 80, 0, 0.5), (42, 80, 0.5, 1.0), (36, 80, 0.5, 1.0), (38, 80, 2.0, 2.5), (36, 80, 4.5, 5.0), (38, 80, 6.5, 7.0), (48, 80, 8.0, 8.5), (49, 80, 8.0, 8.5), (51, 80, 9.5, 10.0), (38, 80, 16.5, 17.0), (48, 80, 18.0, 18.5), (49, 80, 18.0, 18.5), (51, 80, 19.5, 20.0)], is_drum=True) for n in expected_sequence.notes: n.instrument = 9 self.assertProtoEquals(expected_sequence, sequences[0]) class TrioConverterTest(BaseDataTest, tf.test.TestCase): def setUp(self): sequence = music_pb2.NoteSequence() sequence.tempos.add(qpm=60) # Mel 1, coverage bars: [3, 9] / [2, 9] testing_lib.add_track_to_sequence( sequence, 1, [(51, 1, 13, 37)]) # Mel 2, coverage bars: [1, 3] / [0, 4] testing_lib.add_track_to_sequence( sequence, 2, [(52, 1, 4, 16)]) # Bass, coverage bars: [0, 1], [4, 6] / [0, 7] testing_lib.add_track_to_sequence( sequence, 3, [(50, 1, 2, 5), (49, 1, 16, 25)]) # Drum, coverage bars: [0, 2], [6, 7] / [0, 3], [5, 8] testing_lib.add_track_to_sequence( sequence, 4, [(35, 1, 0, 1), (40, 1, 4, 5), (35, 1, 9, 9), (35, 1, 25, 25), (40, 1, 29, 29)], is_drum=True) # Chords. testing_lib.add_chords_to_sequence( sequence, [('C', 4), ('Am', 16), ('G', 32)]) for n in sequence.notes: if n.instrument == 1: n.program = 0 elif n.instrument == 2: n.program = 10 elif n.instrument == 3: n.program = 33 self.sequence = sequence m1 = np.array( [NO_EVENT] * 13 + [30] + [NO_EVENT] * 23 + [NOTE_OFF] + [NO_EVENT] * 2, np.int32) + 2 m2 = np.array( [NO_EVENT] * 4 + [31] + [NO_EVENT] * 11 + [NOTE_OFF] + [NO_EVENT] * 23, np.int32) + 2 b = np.array( [NO_EVENT, NO_EVENT, 29, NO_EVENT, NO_EVENT, NOTE_OFF] + [NO_EVENT] * 10 + [28] + [NO_EVENT] * 8 + [NOTE_OFF] + [NO_EVENT] * 14, np.int32) + 2 d = ([1, NO_DRUMS, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS, NO_DRUMS, NO_DRUMS, 1, NO_DRUMS, NO_DRUMS] + [NO_DRUMS] * 12 + [NO_DRUMS, 1, NO_DRUMS, NO_DRUMS, NO_DRUMS, 2, NO_DRUMS, NO_DRUMS] + [NO_DRUMS] * 4) c = [NO_CHORD, NO_CHORD, NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'Am', 'G', 'G', 'G', 'G'] expected_sliced_sets = [ ((2, 4), (m1, b, d)), ((5, 7), (m1, b, d)), ((6, 8), (m1, b, d)), ((0, 2), (m2, b, d)), ((1, 3), (m2, b, d)), ((2, 4), (m2, b, d)), ] self.expected_sliced_labels = [ np.stack([l[i*4:j*4] for l in x]) for (i, j), x in expected_sliced_sets] chord_encoding = mm.MajorMinorChordOneHotEncoding() expected_sliced_chord_events = [ c[i*4:j*4] for (i, j), _ in expected_sliced_sets] self.expected_sliced_chord_labels = [ np.array([chord_encoding.encode_event(e) for e in es]) for es in expected_sliced_chord_events] def testSliced(self): converter = data.TrioConverter( steps_per_quarter=1, gap_bars=1, slice_bars=2, max_tensors_per_notesequence=None) tensors = converter.to_tensors(self.sequence) self.assertArraySetsEqual(tensors.inputs, tensors.outputs) actual_sliced_labels = [ np.stack(np.argmax(s, axis=-1) for s in np.split(t, [90, 180], axis=-1)) for t in tensors.outputs] self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels) def testSlicedChordConditioned(self): converter = data.TrioConverter( steps_per_quarter=1, gap_bars=1, slice_bars=2, max_tensors_per_notesequence=None, chord_encoding=mm.MajorMinorChordOneHotEncoding()) tensors = converter.to_tensors(self.sequence) self.assertArraySetsEqual(tensors.inputs, tensors.outputs) actual_sliced_labels = [ np.stack(np.argmax(s, axis=-1) for s in np.split(t, [90, 180], axis=-1)) for t in tensors.outputs] actual_sliced_chord_labels = [ np.argmax(t, axis=-1) for t in tensors.controls] self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels) self.assertArraySetsEqual( self.expected_sliced_chord_labels, actual_sliced_chord_labels) def testToNoteSequence(self): converter = data.TrioConverter( steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1) mel_oh = data.np_onehot(self.expected_sliced_labels[3][0], 90) bass_oh = data.np_onehot(self.expected_sliced_labels[3][1], 90) drums_oh = data.np_onehot(self.expected_sliced_labels[3][2], 512) output_tensors = np.concatenate([mel_oh, bass_oh, drums_oh], axis=-1) sequences = converter.to_notesequences([output_tensors]) self.assertEqual(1, len(sequences)) self.assertProtoEquals( """ ticks_per_quarter: 220 tempos < qpm: 120 > notes < instrument: 0 pitch: 52 start_time: 2.0 end_time: 4.0 program: 0 velocity: 80 > notes < instrument: 1 pitch: 50 start_time: 1.0 end_time: 2.5 program: 33 velocity: 80 > notes < instrument: 9 pitch: 36 start_time: 0.0 end_time: 0.5 velocity: 80 is_drum: True > notes < instrument: 9 pitch: 38 start_time: 2.0 end_time: 2.5 velocity: 80 is_drum: True > total_time: 4.0 """, sequences[0]) def testToNoteSequenceChordConditioned(self): converter = data.TrioConverter( steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1, chord_encoding=mm.MajorMinorChordOneHotEncoding()) mel_oh = data.np_onehot(self.expected_sliced_labels[3][0], 90) bass_oh = data.np_onehot(self.expected_sliced_labels[3][1], 90) drums_oh = data.np_onehot(self.expected_sliced_labels[3][2], 512) chords_oh = data.np_onehot(self.expected_sliced_chord_labels[3], 25) output_tensors = np.concatenate([mel_oh, bass_oh, drums_oh], axis=-1) sequences = converter.to_notesequences([output_tensors], [chords_oh]) self.assertEqual(1, len(sequences)) self.assertProtoEquals( """ ticks_per_quarter: 220 tempos < qpm: 120 > notes < instrument: 0 pitch: 52 start_time: 2.0 end_time: 4.0 program: 0 velocity: 80 > notes < instrument: 1 pitch: 50 start_time: 1.0 end_time: 2.5 program: 33 velocity: 80 > notes < instrument: 9 pitch: 36 start_time: 0.0 end_time: 0.5 velocity: 80 is_drum: True > notes < instrument: 9 pitch: 38 start_time: 2.0 end_time: 2.5 velocity: 80 is_drum: True > text_annotations < text: 'N.C.' annotation_type: CHORD_SYMBOL > text_annotations < time: 2.0 text: 'C' annotation_type: CHORD_SYMBOL > total_time: 4.0 """, sequences[0]) class GrooveConverterTest(tf.test.TestCase): def initialize_sequence(self): sequence = music_pb2.NoteSequence() sequence.ticks_per_quarter = 240 sequence.tempos.add(qpm=120) sequence.time_signatures.add(numerator=4, denominator=4) return sequence def setUp(self): self.one_bar_sequence = self.initialize_sequence() self.two_bar_sequence = self.initialize_sequence() self.tap_sequence
<gh_stars>1-10 # Developed by <NAME> and <NAME> on 1/21/19 6:29 PM. # Last modified 1/21/19 6:29 PM # Copyright (c) 2019. All rights reserved. import itertools import time from enum import Enum from math import floor from typing import List from PyQt5.QtCore import pyqtSignal, Qt, QEvent, QObject, QTimer, QSize, pyqtSlot, QRect, QPoint from PyQt5.QtGui import QPalette, QImage, QPixmap, QMouseEvent, QResizeEvent, QCloseEvent, QPaintEvent, QPainter, \ QPolygon, QCursor, QColor, QBrush, QPainterPath, QPolygonF from PyQt5.QtWidgets import QScrollArea, QLabel, QSizePolicy, QMainWindow, QDockWidget, QWidget, QPushButton, \ QHBoxLayout, QApplication, QStyle import numpy as np from numpy import ma from openspectra.image import Image, BandDescriptor from openspectra.openspecrtra_tools import RegionOfInterest from openspectra.openspectra_file import OpenSpectraHeader from openspectra.utils import LogHelper, Logger, Singleton class ColorPicker(metaclass=Singleton): def __init__(self): self.__colors = [Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.yellow, Qt.magenta, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray] self.__index = 0 def current_color(self) -> QColor: return self.__colors[self.__index] def next_color(self) -> QColor: self.__index += 1 if self.__index >= len(self.__colors): self.reset() return self.__colors[self.__index] def reset(self): self.__index = 0 class AdjustedMouseEvent(QObject): def __init__(self, event:QMouseEvent, x_scale:float, y_scale:float): super().__init__(None) self.__event = event self.__pixel_x = floor(event.x() * x_scale) self.__pixel_y = floor(event.y() * y_scale) self.__pixel_pos = (self.__pixel_x, self.__pixel_y) def mouse_event(self) -> QMouseEvent: return self.__event def pixel_x(self) -> int: return self.__pixel_x def pixel_y(self) -> int: return self.__pixel_y def pixel_pos(self) -> (int, int): return self.__pixel_pos class RegionDisplayItem(QObject): toggled = pyqtSignal(QObject) closed = pyqtSignal(QObject) def __init__(self, x_zoom_factor: float, y_zoom_factor:float, color:QColor, is_on:bool, painter_path:QPainterPath=None, points:List[QPoint]=None): super().__init__(None) self.__x_zoom_factor = x_zoom_factor self.__y_zoom_factor = y_zoom_factor self.__color = color self.__is_on = is_on self.__painter_path = painter_path self.__points = points def painter_path(self) -> QPainterPath: return self.__painter_path def points(self) -> List[QPoint]: return self.__points def append_points(self, points:List[QPoint]): self.__points.extend(points) def color(self) -> QColor: return self.__color def is_on(self) -> bool: return self.__is_on def set_is_on(self, is_on:bool): self.__is_on = is_on self.toggled.emit(self) def close(self): self.closed.emit(self) def x_zoom_factor(self) -> float: return self.__x_zoom_factor def y_zoom_factor(self) -> float: return self.__y_zoom_factor class WindowCloseEvent(QObject): def __init__(self, target:QMainWindow): super().__init__(None) self.__target = target def target(self) -> QMainWindow: return self.__target class AreaSelectedEvent(QObject): def __init__(self, region:RegionOfInterest, display_item:RegionDisplayItem): super().__init__(None) self.__region = region self.__display_item = display_item def region(self) -> RegionOfInterest: return self.__region def display_item(self) -> RegionDisplayItem: return self.__display_item class ImageResizeEvent(QObject): def __init__(self, image_size:QSize, viewport_size:QSize): super().__init__(None) self.__image_size = image_size self.__viewport_size = viewport_size def image_size(self) -> QSize: return self.__image_size def viewport_size(self) -> QSize: return self.__viewport_size class ViewZoomChangeEvent(QObject): def __init__(self, factor:float, size:QSize): super().__init__(None) self.__factor = factor self.__size = size def factor(self) -> float: return self.__factor def size(self) -> QSize: return self.__size class ViewLocationChangeEvent(QObject): def __init__(self, center:QPoint): super().__init__(None) self.__center = center def center(self) -> QPoint: return self.__center def scale(self, scale_factor:float): self.__center *= scale_factor class ViewChangeEvent(QObject): def __init__(self, center:QPoint, size:QSize): super().__init__(None) self.__center = center self.__size = size def size(self) -> QSize: return self.__size def center(self) -> QPoint: return self.__center def scale(self, scale_factor:float): self.__center *= scale_factor self.__size *= scale_factor class MouseCoordinates(QLabel): __LOG:Logger = LogHelper.logger("MouseCoordinates") def __init__(self, map_info:OpenSpectraHeader.MapInfo=None, parent=None): super().__init__(parent) self.__map_info = map_info if self.__map_info is not None: self.__formatter = " sample: {0} line: {1}, x: {2:.3f}, y: {3:.3f}" else: self.__formatter = " sample: {0} line: {1}" @pyqtSlot(AdjustedMouseEvent) def on_mouse_move(self, event:AdjustedMouseEvent): # users are accustom to screen coordinates being 1 based if self.__map_info is not None: coords = self.__map_info.calculate_coordinates(event.pixel_x(), event.pixel_y()) self.setText(self.__formatter.format(event.pixel_x() + 1, event.pixel_y() + 1, coords[0], coords[1])) else: self.setText(" sample: {0} line: {1}".format( event.pixel_x() + 1, event.pixel_y() + 1)) class ZoomWidget(QWidget): zoom_in = pyqtSignal() zoom_out = pyqtSignal() reset_zoom = pyqtSignal() def __init__(self, parent=None, initial_zoom:float=1.0): super().__init__(parent) self.__factor_label = QLabel() self.__factor_label.setFixedHeight(12) self.__factor_label.setFixedWidth(40) self.set_zoom_label(initial_zoom) layout = QHBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.setAlignment(Qt.AlignRight) layout.addWidget(self.__factor_label) self.setLayout(layout) def set_zoom_label(self, new_zoom_factor:float): self.__factor_label.setText("{:5.2f}".format(new_zoom_factor)) class ReversePixelCalculator(): """For a given pixel selected on a zoomed in image get_points returns all of the pixels that should be drawn on the zoomed image to cover the same area as would be in the 1 to 1 image""" __LOG: Logger = LogHelper.logger("ReversePixelCalculator") def __init__(self, x_size:int, y_size:int, x_zoom_factor:float, y_zoom_factor:float): if x_zoom_factor < 1.0 or y_zoom_factor < 1.0: raise ValueError("Zoom factors should be at least 1.0 or greater") self.update_params(x_size, y_size, x_zoom_factor, y_zoom_factor) def update_params(self, x_size:int, y_size:int, x_zoom_factor:float, y_zoom_factor:float): self.__x_max = x_size self.__y_max = y_size self.__x_zoom = x_zoom_factor self.__y_zoom = y_zoom_factor def get_points(self, x:int, y:int) -> List[QPoint]: if x >= self.__x_max: raise ValueError("x value must be less than {0}".format(self.__x_max)) if y >= self.__y_max: raise ValueError("y value must be less than {0}".format(self.__y_max)) x_mapped = floor(x/self.__x_zoom) y_mapped = floor(y/self.__y_zoom) x_min = x_max = x x_val = x - 1 while floor(x_val/self.__x_zoom) == x_mapped: x_min = x_val x_val = x_val - 1 x_val = x + 1 while floor(x_val/self.__x_zoom) == x_mapped: x_max = x_val x_val = x_val + 1 y_min = y_max = y y_val = y - 1 while floor(y_val/self.__y_zoom) == y_mapped: y_min = y_val y_val = y_val - 1 y_val = y + 1 while floor(y_val/self.__y_zoom) == y_mapped: y_max = y_val y_val = y_val + 1 point_list:List[QPoint] = list() for x_point in range(x_min, x_max + 1): for y_point in range(y_min, y_max + 1): point_list.append(QPoint(x_point, y_point)) ReversePixelCalculator.__LOG.debug("x_map: {0}, y_map: {1}, x_min: {2}, x_max: {3}, y_min: {4}, y_max: {5}". format(x_mapped, y_mapped, x_min, x_max, y_min, y_max)) return point_list class ImageLabel(QLabel): __LOG:Logger = LogHelper.logger("ImageLabel") class Action(Enum): Nothing = 0 Dragging = 1 Drawing = 2 Picking = 3 # on double click we get both clicked and doubleClicked # decide if we need both and fix area_selected = pyqtSignal(AreaSelectedEvent) left_clicked = pyqtSignal(AdjustedMouseEvent) right_clicked = pyqtSignal(AdjustedMouseEvent) double_clicked = pyqtSignal(AdjustedMouseEvent) mouse_move = pyqtSignal(AdjustedMouseEvent) locator_moved = pyqtSignal(ViewLocationChangeEvent) def __init__(self, image_descriptor:BandDescriptor, location_rect:bool=True, pixel_select:bool=False, parent=None): super().__init__(parent) # Install our event filter self.installEventFilter(self) # Image descriptor self.__descriptor = image_descriptor # mouse location self.__last_mouse_loc:QPoint = None # Parameters related to the image size self.__initial_size:QSize = None self.__width_scale_factor = 1.0 self.__height_scale_factor = 1.0 # Cursors we'll use self.__default_cursor = self.cursor() self.__drag_cursor = QCursor(Qt.ClosedHandCursor) self.__draw_cursor = QCursor(Qt.CrossCursor) self.__pick_cursor = QCursor(Qt.PointingHandCursor) # Initialize the locator if we have one if location_rect: # Initial size doesn't really matter, # it will get adjusted based on the zoom window size self.__locator_rect = QRect(0, 0, 50, 50) else: self.__locator_rect = None # The list of regions of interest self.__region_display_items:List[RegionDisplayItem] = list() # Color picker for region of interest displays self.__color_picker = ColorPicker() # Polygon selection items self.__polygon:QPolygon = None self.__polygon_bounds:QRect = None # Pixel selection items self.__pixel_select:bool = pixel_select self.__pixel_mapper:ReversePixelCalculator = None self.__pixel_list:np.ndarray = None self.__region_display_item = None self.__current_action = ImageLabel.Action.Nothing def has_locator(self) -> bool: return self.__locator_rect is not None def locator_position(self) -> QPoint: assert self.__locator_rect is not None return self.__unscale_point(self.__locator_rect.center()) def set_locator_position(self, postion:QPoint): # calls to this method should always be in 1 to 1 coordinates if self.has_locator(): new_position: QPoint = self.__scale_point(postion) ImageLabel.__LOG.debug("setting locator position: {0}, scaled pos: {1}", postion, new_position) self.__locator_rect.moveCenter(new_position) self.update() def locator_size(self) -> QSize: assert self.__locator_rect is not None return self.__unscale_size(self.__locator_rect.size()) def set_locator_size(self, size:QSize): # calls to this method should always be in 1 to 1 coordinates if self.has_locator(): new_size: QSize = self.__scale_size(size) ImageLabel.__LOG.debug("setting locator size: {0}, scaled size: {1}", size, new_size) self.__locator_rect.setSize(new_size) self.update() @pyqtSlot(AreaSelectedEvent) def add_selected_area(self, event:AreaSelectedEvent): display_item = event.display_item() display_item.toggled.connect(self.__handle_region_toggled) display_item.closed.connect(self.__handle__region_closed) self.__region_display_items.append(display_item) self.update() def remove_all_regions(self): self.__region_display_items.clear() self.__clear_selected_area() self.__color_picker.reset() self.update() def setPixmap(self, pixel_map:QPixmap): locator_size:QSize = None locator_position:QPoint = None # If zoom changed we need to end any pixel selecting in progress if self.__current_action == ImageLabel.Action.Picking: self.__end_pixel_select() if self.has_locator(): locator_size = self.locator_size() locator_position = self.locator_position() super().setPixmap(pixel_map) ImageLabel.__LOG.debug("super().setPixmap called") size = self.pixmap().size() if self.__initial_size is not None: self.__width_scale_factor = size.width() / self.__initial_size.width() self.__height_scale_factor = size.height() / self.__initial_size.height() ImageLabel.__LOG.debug("setting image size: {0}, scale factor w: {1}, h: {2}", size, self.__width_scale_factor, self.__height_scale_factor) # reset locator if we have one if self.has_locator(): self.set_locator_size(locator_size) self.set_locator_position(locator_position) # If there's a pixel mapper update it too if self.__pixel_mapper is not None: self.__pixel_mapper.update_params( self.pixmap().size().width(), self.pixmap().size().height(), self.__width_scale_factor, self.__height_scale_factor) self.setMinimumSize(size) self.setMaximumSize(size) def changeEvent(self, event:QEvent): ImageLabel.__LOG.debug("ImageLabel.changeEvent called...") if event.type() == QEvent.ParentChange and self.pixmap() is not None \ and self.__initial_size is None: self.__initial_size = self.pixmap().size() def mouseMoveEvent(self, event:QMouseEvent): if self.__current_action == ImageLabel.Action.Drawing and self.__polygon is not None: # ImageLabel.__LOG.debug("drawing mouse move event, pos: {0}, size: {1}", event.pos(), self.pixmap().size()) self.__polygon << event.pos() self.update() elif self.__current_action == ImageLabel.Action.Dragging and \ self.__last_mouse_loc is not None and self.__locator_rect is not None: # ImageLabel.__LOG.debug("dragging mouse move event, pos: {0}, size: {1}", event.pos(), self.pixmap().size()) center = self.__locator_rect.center() center += event.pos() - self.__last_mouse_loc self.__locator_rect.moveCenter(center) self.__last_mouse_loc = event.pos() self.locator_moved.emit(ViewLocationChangeEvent(self.__unscale_point(center))) self.update() else: # don't emit pixel locations when
# Copyright (C) 2002, <NAME> (<EMAIL>) # Copyright (C) 2017, <NAME> (<EMAIL>) # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Calculation of residue depth using command line tool MSMS. This module uses Michel Sanner's MSMS program for the surface calculation. See: http://mgltools.scripps.edu/packages/MSMS Residue depth is the average distance of the atoms of a residue from the solvent accessible surface. Residue Depth:: from Bio.PDB.ResidueDepth import ResidueDepth from Bio.PDB.PDBParser import PDBParser parser = PDBParser() structure = parser.get_structure("1a8o", "Tests/PDB/1A8O.pdb") model = structure[0] rd = ResidueDepth(model) print(rd['A',(' ', 152, ' ')]) Direct MSMS interface, typical use:: from Bio.PDB.ResidueDepth import get_surface surface = get_surface(model) The surface is a Numeric array with all the surface vertices. Distance to surface:: from Bio.PDB.ResidueDepth import min_dist coord = (1.113, 35.393, 9.268) dist = min_dist(coord, surface) where coord is the coord of an atom within the volume bound by the surface (ie. atom depth). To calculate the residue depth (average atom depth of the atoms in a residue):: from Bio.PDB.ResidueDepth import residue_depth chain = model['A'] res152 = chain[152] rd = residue_depth(res152, surface) """ import os import tempfile import warnings import subprocess import numpy from Bio.PDB import PDBParser from Bio.PDB import Selection from Bio.PDB.AbstractPropertyMap import AbstractPropertyMap from Bio.PDB.Polypeptide import is_aa from Bio import BiopythonWarning from Bio import BiopythonDeprecationWarning # PDB_TO_XYZR is a BASH script and will not run on Windows # Since it only reads atmtypenumbers to a mapping structure we can replicate # that functionality here and avoid this dependency altogether. # # Description of PDB_TO_XYZR # Maps residue type and atom name pairs into Connolly ".atm" numeric codes # as used in MS and AMS, and into actual radius values # # In case of missing radius, use 0.01 # # Table 1: Atom Type to radius _atomic_radii = { # atom num dist Rexplicit Runited-atom 1: (0.57, 1.40, 1.40), 2: (0.66, 1.40, 1.60), 3: (0.57, 1.40, 1.40), 4: (0.70, 1.54, 1.70), 5: (0.70, 1.54, 1.80), 6: (0.70, 1.54, 2.00), 7: (0.77, 1.74, 2.00), 8: (0.77, 1.74, 2.00), 9: (0.77, 1.74, 2.00), 10: (0.67, 1.74, 1.74), 11: (0.70, 1.74, 1.86), 12: (1.04, 1.80, 1.85), 13: (1.04, 1.80, 1.80), # P, S, and LonePairs 14: (0.70, 1.54, 1.54), # non-protonated nitrogens 15: (0.37, 1.20, 1.20), # H, D hydrogen and deuterium 16: (0.70, 0.00, 1.50), # obsolete entry, purpose unknown 17: (3.50, 5.00, 5.00), # pseudoatom - big ball 18: (1.74, 1.97, 1.97), # Ca calcium 19: (1.25, 1.40, 1.40), # Zn zinc (traditional radius) 20: (1.17, 1.40, 1.40), # Cu copper (traditional radius) 21: (1.45, 1.30, 1.30), # Fe heme iron 22: (1.41, 1.49, 1.49), # Cd cadmium 23: (0.01, 0.01, 0.01), # pseudoatom - tiny dot 24: (0.37, 1.20, 0.00), # hydrogen vanishing if united-atoms 25: (1.16, 1.24, 1.24), # Fe not in heme 26: (1.36, 1.60, 1.60), # Mg magnesium 27: (1.17, 1.24, 1.24), # Mn manganese 28: (1.16, 1.25, 1.25), # Co cobalt 29: (1.17, 2.15, 2.15), # Se selenium 30: (3.00, 3.00, 3.00), # obsolete entry, original purpose unknown 31: (1.15, 1.15, 1.15), # Yb ytterbium +3 ion --- wild guess only 38: (0.95, 1.80, 1.80), # obsolete entry, original purpose unknown } # Table 2: Resname/Aname to Atom Type # MSMS uses an awk/gawk pattern matching strategy that we cannot replicate # We will take advantage of our parser to help us in the mapping. def _get_atom_radius(atom, rtype="united"): """Translate an atom object to an atomic radius defined in MSMS (PRIVATE). Uses information from the parent residue and the atom object to define the atom type. Returns the radius (float) according to the selected type: - explicit (reads hydrogens) - united (default) """ if rtype == "explicit": typekey = 1 elif rtype == "united": typekey = 2 else: raise ValueError( "Radius type (%r) not understood. Must be 'explicit' or 'united'" % rtype ) resname = atom.parent.resname het_atm = atom.parent.id[0] at_name = atom.name at_elem = atom.element # Hydrogens if at_elem == "H" or at_elem == "D": return _atomic_radii[15][typekey] # HETATMs elif het_atm == "W" and at_elem == "O": return _atomic_radii[2][typekey] elif het_atm != " " and at_elem == "CA": return _atomic_radii[18][typekey] elif het_atm != " " and at_elem == "CD": return _atomic_radii[22][typekey] elif resname == "ACE" and at_name == "CA": return _atomic_radii[9][typekey] # Main chain atoms elif at_name == "N": return _atomic_radii[4][typekey] elif at_name == "CA": return _atomic_radii[7][typekey] elif at_name == "C": return _atomic_radii[10][typekey] elif at_name == "O": return _atomic_radii[1][typekey] elif at_name == "P": return _atomic_radii[13][typekey] # CB atoms elif at_name == "CB" and resname == "ALA": return _atomic_radii[9][typekey] elif at_name == "CB" and resname in {"ILE", "THR", "VAL"}: return _atomic_radii[7][typekey] elif at_name == "CB": return _atomic_radii[8][typekey] # CG atoms elif at_name == "CG" and resname in { "ASN", "ASP", "ASX", "HIS", "HIP", "HIE", "HID", "HISN", "HISL", "LEU", "PHE", "TRP", "TYR", }: return _atomic_radii[10][typekey] elif at_name == "CG" and resname == "LEU": return _atomic_radii[7][typekey] elif at_name == "CG": return _atomic_radii[8][typekey] # General amino acids in alphabetical order elif resname == "GLN" and at_elem == "O": return _atomic_radii[3][typekey] elif resname == "ACE" and at_name == "CH3": return _atomic_radii[9][typekey] elif resname == "ARG" and at_name == "CD": return _atomic_radii[8][typekey] elif resname == "ARG" and at_name in {"NE", "RE"}: return _atomic_radii[4][typekey] elif resname == "ARG" and at_name == "CZ": return _atomic_radii[10][typekey] elif resname == "ARG" and at_name.startswith(("NH", "RH")): return _atomic_radii[5][typekey] elif resname == "ASN" and at_name == "OD1": return _atomic_radii[1][typekey] elif resname == "ASN" and at_name == "ND2": return _atomic_radii[5][typekey] elif resname == "ASN" and at_name.startswith("AD"): return _atomic_radii[3][typekey] elif resname == "ASP" and at_name.startswith(("OD", "ED")): return _atomic_radii[3][typekey] elif resname == "ASX" and at_name.startswith("OD1"): return _atomic_radii[1][typekey] elif resname == "ASX" and at_name == "ND2": return _atomic_radii[3][typekey] elif resname == "ASX" and at_name.startswith(("OD", "AD")): return _atomic_radii[3][typekey] elif resname in {"CYS", "CYX", "CYM"} and at_name == "SG": return _atomic_radii[13][typekey] elif resname in {"CYS", "MET"} and at_name.startswith("LP"): return _atomic_radii[13][typekey] elif resname == "CUH" and at_name == "SG": return _atomic_radii[12][typekey] elif resname == "GLU" and at_name.startswith(("OE", "EE")): return _atomic_radii[3][typekey] elif resname in {"GLU", "GLN", "GLX"} and at_name == "CD": return _atomic_radii[10][typekey] elif resname == "GLN" and at_name == "OE1": return _atomic_radii[1][typekey] elif resname == "GLN" and at_name == "NE2": return _atomic_radii[5][typekey] elif resname in {"GLN", "GLX"} and at_name.startswith("AE"): return _atomic_radii[3][typekey] # Histdines and friends # There are 4 kinds of HIS rings: HIS (no protons), HID (proton on Delta), # HIE (proton on epsilon), and HIP (protons on both) # Protonated nitrogens are numbered 4, else 14 # HIS is treated here as the same as HIE # # HISL is a deprotonated HIS (the L means liganded) elif resname in {"HIS", "HID", "HIE", "HIP", "HISL"} and at_name in {"CE1", "CD2"}: return _atomic_radii[11][typekey] elif resname in {"HIS", "HID", "HIE", "HISL"} and at_name == "ND1": return _atomic_radii[14][typekey] elif resname in {"HID", "HIP"} and at_name in {"ND1", "RD1"}: return _atomic_radii[4][typekey] elif resname in {"HIS", "HIE", "HIP"} and at_name in {"NE2", "RE2"}: return _atomic_radii[4][typekey] elif resname in {"HID", "HISL"} and at_name in {"NE2", "RE2"}: return _atomic_radii[14][typekey] elif resname in {"HIS", "HID", "HIP", "HISL"} and at_name.startswith(("AD", "AE")): return _atomic_radii[4][typekey] # More amino acids elif resname == "ILE" and at_name == "CG1": return _atomic_radii[8][typekey] elif resname == "ILE" and at_name == "CG2": return _atomic_radii[9][typekey] elif resname == "ILE" and at_name in {"CD", "CD1"}: return _atomic_radii[9][typekey] elif resname == "LEU" and at_name.startswith("CD"): return _atomic_radii[9][typekey] elif resname == "LYS" and at_name in {"CG", "CD", "CE"}: return _atomic_radii[8][typekey] elif resname == "LYS" and at_name in {"NZ", "KZ"}: return _atomic_radii[6][typekey] elif resname == "MET" and at_name == "SD": return _atomic_radii[13][typekey] elif resname == "MET" and at_name == "CE": return _atomic_radii[9][typekey] elif resname == "PHE" and at_name.startswith(("CD", "CE", "CZ")): return _atomic_radii[11][typekey] elif resname == "PRO" and at_name in {"CG", "CD"}: return _atomic_radii[8][typekey] elif resname == "CSO" and at_name in {"SE", "SEG"}: return _atomic_radii[9][typekey] elif resname == "CSO" and at_name.startswith("OD"): return _atomic_radii[3][typekey] elif resname == "SER" and at_name == "OG": return _atomic_radii[2][typekey] elif resname == "THR" and at_name
for logging. # This object is created in this module, (in init_logging()), it gets # initialized separately within each thread, and then it is updated # dynamically, if needed, as the thread progresses. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv global theLoggingContext # A single global (but thread-local) LoggingContext object. theLoggingContext = None # To be properly initialized later. #============================================================================= # mainLogger:Logger [module public global object] # # This is the main logger for the application. It is the logger # that should be used in modules that don't bother to define their # own logger (but most modules should define a logger, either based # on the systemName, the appName, or at least the module's own name). # # We don't initialize this when the module is first loaded, but # wait until logmaster.initLogMaster() is called, which should be done # only once in the program, before using any logging capabilities. global mainLogger mainLogger = None # Initialized later. # Some more loggers. global sysLogger, appLogger sysLogger = None appLogger = None #====================== # Private Globals. [code subsection] #====================== #======================================================================== # initialized:bool [module private global variable] # # This bool is set to True once the logmaster module has been # initialized. It is used to prevent the module from being # initialized more than once. Use the initLogMaster() function # to initialize the module. (We didn't use a flag here because # nobody else will get the opportunity to wait for it anyway.) global initialized initialized = False # Initially false; this module has not yet been initialized. #=============================================================== # moduleLogger:Logger [module private global object] # # This logger (which is not publicly exported) is # to be used, from within this module only, for log # messages generated by this module. This is also a # model for what other non-application-specific # modules that use logmaster should do to get their # own module-local loggers. global moduleLogger moduleLogger = None # Initialize in init_logging() #================================================================= # _srcfile [module private global variable] # # Copied from logging/__init__.py. This code had to be # copied into this module in order for it to get the value # of __file__ that we want to use here. global _srcfile # # _srcfile is used when walking the stack to check when we've got the first # caller stack frame. # if hasattr(sys, 'frozen'): #support for py2exe # This part is inappropriate for us, _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:]) # not sure how to fix yet. -mpf elif __file__[-4:].lower() in ['.pyc', '.pyo']: _srcfile = __file__[:-4] + '.py' else: _srcfile = __file__ _srcfile = os.path.normcase(_srcfile) #================================================================== # Class definitions. [code section] #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv #============================================================== # Exception classes. [code subsection] #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv #========================================================== # LoggedException [public exception class] # # A LoggedException is an exception such that, # when it is first constructed, a log message # of an appropriate level is automatically # generated and sent to a specified logger. # # The constructor for a LoggedException takes # a logger and a message. The logging level for # the message is determined by the subclass of # LoggedException that is used (see subclasses # below). # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class LoggedException(Exception): #========================================================= # .defLogger [class variable] # # The default logger to use for exceptions of a # given (subclass) type. NOTE: This value is # just a placeholder for this attribute in this # abstract base class. All concrete derived # classes (ones for which a constructor will # actually be called) MUST override this class # variable, or it will cause an exception to be # thrown (before the one you want gets thrown)! # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv defLogger = None # Default logger. None defined for this base class. #--------------------------------------------------------- # .loglevel [class variable] # # The logging level of the exception. We set # its default value to NOTSET as a placeholder # in this abstract class. Subclasses should # override this value with the value that is # appropriate for their purposes. This is done # in the subclasses defined below. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv loglevel = logging.NOTSET #---------------------------------------------------------- # .__init__() [special instance method] # # The instance initializer for a LoggedException # creates a default log message for the exception # (if none is provided), and then goes ahead and # logs the occurrence of the exception to the # provided logger. Note that this initial logging # of the exception will generally occur *before* # the exception is actually raised, in the raising # routine wherever the exception's constructor is # called. The entity that catches the exception # may of course do additional logging, such as a # logger.exception() call which will also display # a traceback. We avoid printing tracebacks here, # since that may not be needed for exceptions that # are caught and handled appropriately somewhere # in the calling (i.e., catching) code. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv def __init__(inst, msg:str=None, level=None, logger:logging.Logger=None): if logger==None: logger = inst.defLogger # This should get the value of this # class variable for the object's actual class (not from # this abstract base class LoggedException). if logger==None: moduleLogger.error("LoggedException.__init__(): No default logger " "provided for this class of LoggedException.") traceback.print_stack() # goes to sys.stderr raise TypeError("LoggedException.__init__(): No default logger " "provided for this class of LoggedException.") if level==None: level = inst.loglevel # Get derived class's log level. if msg==None: msg = ('Creating a LoggedException at level %s.' % logging._levelNames[level]) # print("About to log message [%s] at level %s." % (msg, str(level)), # file=sys.stdout) logger.log(level, msg) # End class LoggedException #========================================================== # InfoException [public exception class] # # An InfoException, when it is raised at all, is # simply a way to indicate in which of several # normal ways a routine is returning, in cases # where this information is worth reporting in # the log file at INFO level. An InfoException # should be immediately caught by the caller & # not re-raised. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class InfoException(LoggedException): loglevel = logging.INFO # End class InfoException #========================================================== # ExitException [public exception class] # # An ExitException is like an InfoException in # that it is reported at INFO level; however, it # is intended to cause the entire thread in which # it takes place to terminate. (Use FatalException # to cause the entire application PROCESS to # terminate.) #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class ExitException(LoggedException): loglevel = logging.INFO # End class ExitException #========================================================== # WarningException [public exception class] # # These exceptions, when they are raised at all, # are simply used as a way to exit early from a # routine with some indication as why we are # exiting early, due to some harmless but unex- # pected circumstance that is worth reporting at # WARNING level, as a sort of alternate return # code; they should be caught (and not re-raised) # at a high level, before they have a chance to # interfere significantly with overall program # flow. Basically, for any routine that might # throw a WarningException, all callers of that # routine should handle it. # # Creating a WarningException automatically generates # log message at WARNING level. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class WarningException(LoggedException): loglevel = logging.WARNING # End class WarningException. #========================================================= # ErrorException [public exception class] # # An ErrorException indicates a fairly serious # problem, one that might prevent us from doing # (or doing properly) a fairly signifcant task. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class ErrorException(LoggedException): loglevel = logging.ERROR # End class ErrorException. #========================================================= # CriticalException [public exception class] # # A CriticalException indicates a very serious # problem, one that might prevent us from doing # a very important task. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class CriticalException(LoggedException): loglevel = logging.CRITICAL # End class CriticalException #========================================================== # FatalException [public exception class] # # Like a CriticalException, but even worse. # When this exception is raised, it generally # causes the application to exit. # #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv class FatalException(LoggedException): loglevel = logging.FATAL # This is actually the same as CRITICAL. # End class FatalException #^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys SHORT_VOWELS = ['a', 'E', 'I', 'i', 'O', 'Y', 'u', '9', 'ai', 'ei', 'Ou', '9Y'] LONG_VOWELS = ['a:', 'E:', 'I:', 'i:', 'O:', 'Y:', 'u:', '9:', 'ai:', 'ei:', 'Ou:', '9Y:'] # These mappings are mostly not generally valid, but only in certain contexts. They are rare mappings, filtered out # of the list of all mappings occuring < 20 times in the aligned dictionary, where the corresponding transcripts are # correct, but most of the almost 300 mappings from this list are the results of erroneus transcripts. VALID_MAPPINGS = [('e', 'ei:'), ('ð', 'T'), ('g', 't'), ('a', 'ai:'), ('hl', ''), ('nd', 'm'), ('ts', ''), ('f', 't'), ('sl', 's t l_0'), ('nn', 'm'), ('sd', ''), ('hn', ''), ('mn', ''), ('nk', ''), ('kg', ''), ('tss', ''), ('nl', ''), ('gg', ''), ('', 'n t'), ('nu', 'Yi'), ('hé', 'C ei'), ('ng', ''), ('', 'n_0 t'), ('pp', ''), ('é', 'E'), ('nf', ''), ('zz', 't s'), ('pf', ''), ('gsl', 's t l_0'), ('gk', ''), ('hé', 'C E'), ('hé', 'C E h'), ('pb', ''), ('n', 't'), ('nn', 'J'), ('nn', 'N_0'), ('fn', 'm_0'), ('gn', 'N_0')] class G2P_align: def __init__(self, prondict_list, min_occur=1000): self.g2p_map = {} self.one_on_one_map = self.init_map(prondict_list) self.init_g2p_map(self.one_on_one_map, min_occur) self.add_special_mappings() def init_map(self, p_list): one_on_one_map = {} for line in p_list: word, transcr = line.strip().split('\t') tuples = self.map_g2p_one_on_one(word, transcr) for t in tuples: if t in one_on_one_map: one_on_one_map[t] = one_on_one_map[t] + 1 else: one_on_one_map[t] = 1 return one_on_one_map def map_g2p_one_on_one(self, word, transcript): """ Get basic g2p mapping, only create mappings for word-transcript pairs that are equally long. :param word: :param transcript: :return: a list of g2p tuples if mapped, an empty list otherwise """ word_arr = list(word) tr_arr = transcript.split() tuples = [] if len(word_arr) != len(tr_arr): return tuples for ind, c in enumerate(word_arr): tuples.append((c.lower(), tr_arr[ind])) return tuples def init_g2p_map(self, map_to_filter, min_occur): self.g2p_map = {} for t in map_to_filter.keys(): if map_to_filter[t] > min_occur: if t[0] in self.g2p_map: self.g2p_map[t[0]].append(t[1]) else: self.g2p_map[t[0]] = [t[1]] def extend_mapping(self, prondict_list, min_occur=100): extended_g2p_map = {} for line in prondict_list: word, transcr = line.strip().split('\t') aligned = align_g2p(word, transcr, self.g2p_map) for t in aligned: if t in extended_g2p_map: extended_g2p_map[t] = extended_g2p_map[t] + 1 else: extended_g2p_map[t] = 1 self.init_g2p_map(extended_g2p_map, min_occur) self.add_special_mappings() def add_special_mappings(self): if 'i' in self.g2p_map: self.g2p_map['y'] = self.g2p_map['i'] if 'í' in self.g2p_map: self.g2p_map['ý'] = self.g2p_map['í'] # ensure short and long versions of vowels for grapheme in self.g2p_map.keys(): for p in self.g2p_map[grapheme]: if p in SHORT_VOWELS: if p + ':' not in self.g2p_map[grapheme]: self.g2p_map[grapheme].append(p + ':') elif p in LONG_VOWELS: short = p.replace(':', '') if short not in self.g2p_map[grapheme]: self.g2p_map[grapheme].append(short) # manually add dipththongs and other special cases: self.g2p_map['ei'] = ['ei', 'ei:'] self.g2p_map['ey'] = ['ei', 'ei:'] self.g2p_map['au'] = ['9Y', '9Y:'] self.g2p_map['hj'] = ['C'] self.g2p_map['hl'] = ['l_0'] self.g2p_map['hr'] = ['r_0'] self.g2p_map['hn'] = ['n_0'] self.g2p_map['sl'] = ['s t l'] self.g2p_map['tns'] = ['s'] # vatns - /v a s/ self.g2p_map['x'] = ['k s'] self.g2p_map['é'] = ['j E', 'j E:'] def set_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples): graphemes = ''.join(word_arr[g_anchor + 1: g_ind]) phonemes = ' '.join(tr_arr[p_anchor + 1: p_ind]) if len(graphemes) > 0 or len(phonemes) > 0: g2p_tuples.append((graphemes.lower(), phonemes)) g2p_tuples.append((c.lower(), tr_arr[p_ind])) if len(c) > 1: g_anchor = g_ind + len(c) - 1 else: g_anchor = g_ind p_anchor = p_ind return g_anchor, p_anchor, g2p_tuples def set_triple_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples): graphemes = ''.join(word_arr[g_anchor + 1: g_ind]) phonemes = ' '.join(tr_arr[p_anchor + 1: p_ind]) if len(graphemes) > 0 or len(phonemes) > 0: g2p_tuples.append((graphemes.lower(), phonemes)) g2p_tuples.append((c.lower(), ' '.join(tr_arr[p_ind: p_ind + 3]))) g_anchor = g_ind + 1 p_anchor = p_ind + 2 return g_anchor, p_anchor, g2p_tuples def set_two_phone_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples): graphemes = ''.join(word_arr[g_anchor + 1: g_ind]) phonemes = ' '.join(tr_arr[p_anchor + 1: p_ind]) if len(graphemes) > 0 or len(phonemes) > 0: g2p_tuples.append((graphemes.lower(), phonemes)) g2p_tuples.append((c.lower(), ' '.join(tr_arr[p_ind: p_ind + 2]))) g_anchor = g_ind p_anchor = p_ind + 1 return g_anchor, p_anchor, g2p_tuples def get_diphthong(ind, w_arr): diphthongs = ['ei', 'ey', 'au', 'hj', 'hl', 'hr', 'sl'] if ind < len(w_arr) - 1: pair = w_arr[ind] + w_arr[ind+1] if pair.lower() in diphthongs: return pair.lower() return '' def get_trigram(ind, w_arr): trigrams = ['tns'] if ind < len(w_arr) - 2: trigr_arr = w_arr[ind:ind+3] trigr = ''.join(trigr_arr) if trigr.lower() in trigrams: return trigr.lower() return '' def align_g2p(word, transcript, g2p_map): word_arr = list(word) tr_arr = transcript.split() g2p_tuples = [] g_anchor = -1 p_anchor = -1 p_ind = 0 skip_next = False skip_two = False for g_ind, c in enumerate(word_arr): if skip_next: skip_next = False continue if skip_two: skip_next = True skip_two = False continue c = c.lower() if p_ind < len(tr_arr) and c in g2p_map: if c == 'x' or c == 'é': tmp_phones = ' '.join(tr_arr[p_ind:p_ind + 2]) if tmp_phones in g2p_map[c]: g_anchor, p_anchor, g2p_tuples = set_two_phone_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 2 continue tri = get_trigram(g_ind, word_arr) if len(tri) > 0: c = tri skip_two = True diph = get_diphthong(g_ind, word_arr) if len(diph) > 0: c = diph skip_next = True if c == 'sl': tmp_phones = ' '.join(tr_arr[p_ind:p_ind + 3]) if tmp_phones == 's t l': g_anchor, p_anchor, g2p_tuples = set_triple_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 3 if tr_arr[p_ind] in g2p_map[c]: g_anchor, p_anchor, g2p_tuples = set_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 1 elif p_ind + 1 < len(tr_arr) and tr_arr[p_ind + 1] in g2p_map[c]: if g_ind < len(word_arr) - 1 and word_arr[g_ind + 1] in g2p_map and tr_arr[p_ind] in g2p_map[word_arr[g_ind + 1]]: continue p_ind += 1 g_anchor, p_anchor, g2p_tuples = set_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 1 elif g_ind > p_ind: # if we have more phonemes left than graphemes, do not try to match ahead if len(word_arr) - g_ind > len(tr_arr) - p_ind: continue if g_ind < len(tr_arr) and tr_arr[g_ind] in g2p_map[c]: p_ind = g_ind g_anchor, p_anchor, g2p_tuples = set_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 1 elif g_ind + 1 < len(tr_arr) and tr_arr[g_ind + 1] in g2p_map[c]: p_ind = g_ind + 1 g_anchor, p_anchor, g2p_tuples = set_alignment(c, word_arr, tr_arr, g_anchor, p_anchor, g_ind, p_ind, g2p_tuples) p_ind += 1 # last grapheme? elif g_ind == len(word_arr) - 1: graphemes = ''.join(word_arr[g_anchor + 1:]) phonemes = ' '.join(tr_arr[p_anchor + 1:]) g2p_tuples.append((graphemes.lower(), phonemes)) break elif g_ind < len(tr_arr): p_ind += 1 else: #check the end if p_anchor is not at the end of tr_arr if p_anchor < len(tr_arr) - 1: last_char = word_arr[-1] if last_char in g2p_map and tr_arr[-1] in g2p_map[last_char]: graphemes = ''.join(word_arr[g_anchor + 1:-1]) phonemes = ' '.join(tr_arr[p_anchor + 1:-1]) if len(graphemes) > 0 or len(phonemes) > 0: g2p_tuples.append((graphemes.lower(), phonemes)) g2p_tuples.append((last_char, tr_arr[-1])) break graphemes = ''.join(word_arr[g_anchor + 1:]) phonemes = ' '.join(tr_arr[p_anchor + 1:]) g2p_tuples.append((graphemes.lower(), phonemes)) break if g_anchor < len(word_arr) - 1 or p_anchor < len(tr_arr) - 1: graphemes = ''.join(word_arr[g_anchor + 1:]) phonemes = ' '.join(tr_arr[p_anchor + 1:]) g2p_tuples.append((graphemes.lower(), phonemes)) last_tuple = g2p_tuples[len(g2p_tuples) - 1] #if last_tuple[0] == '' or last_tuple[1] == '': if last_tuple == ('r', '') or last_tuple == ('ð', '') or last_tuple == ('m', '') or last_tuple == ('n', ''): if len(word_arr) > 3 and word_arr[len(word_arr) - 2] != word_arr[len(word_arr) - 1]: g2p_tuples.append(('ERR', 'ERR')) return g2p_tuples def main(): pron_dict_in = open(sys.argv[1]).readlines() g2p = G2P_align(pron_dict_in, 1000) map_size = 0 for e in g2p.g2p_map.keys(): map_size += len(g2p.g2p_map[e]) print("initial map size: " + str(map_size)) map_size = 0 g2p.extend_mapping(pron_dict_in) for e in g2p.g2p_map.keys(): map_size += len(g2p.g2p_map[e]) print("second map size: " + str(map_size)) tmp_g2p_map = {} g2p_map_v2 = {} aligned_dict = [] for line in pron_dict_in: word, transcr = line.strip().split('\t') aligned = align_g2p(word, transcr, g2p.g2p_map) for t in aligned: if t in tmp_g2p_map: tmp_g2p_map[t].append(word + '\t' + transcr) else: tmp_g2p_map[t] = [word + '\t' + transcr] if t in g2p_map_v2: g2p_map_v2[t] = g2p_map_v2[t] + 1 else: g2p_map_v2[t] = 1 aligned_dict.append(word + '\t' + transcr + '\t' + str(aligned)) print("map size in the end: " + str(len(g2p_map_v2))) #for al in aligned_dict: # print(str(al)) out = open('alignment_map_train_0628.txt', 'w') out_err = open('errors_in_alignment_0628.txt',
for each cluster. This type of plots is useful to fast assess library quality and batch effects. Parameters ---------- data : ``AnnData`` or ``UnimodalData`` or ``MultimodalData`` object Single cell expression data. groupby : ``str`` A categorical variable in data.obs that is used to categorize the cells, e.g. cell type. condition: ``str`` A categorical variable in data.obs that is used to calculate frequency within each category defined by ``groupby``, e.g. donor. style: ``str``, optional (default: ``frequency``) Composition plot style. Can be either ``frequency``, or ``normalized``. Within each cluster, the ``frequency`` style show the percentage of cells from each ``condition`` within each category in ``groupby`` (stacked), the ``normalized`` style shows for each category in ``groupby`` the percentage of cells that are also in each ``condition`` over all cells that are in the same ``condition`` (not stacked). restrictions: ``str`` or ``List[str]``, optional, default: None A list of restrictions to subset data for plotting. Each restriction takes the format of 'key:value,value...', or 'key:~value,value...'. This restriction selects cells with the ``data.obs[key]`` values belong to 'value,value...' (or not belong to if '~' shows). switch_axes: ``bool``, optional, default: ``False`` By default, X axis is for groupby, and Y axis for frequencies with respect to condition. If this parameter is ``True``, switch the axes. groupby_label: ``str``, optional (default ``None``) Label for the axis displaying ``groupby`` categories. If ``None``, use ``groupby``. sort_function: ``Union[Callable[List[str], List[str]], str]``, optional, default: ``natsorted`` Function used for sorting both groupby and condition labels. If ``natsorted``, apply natsorted function to sort by natural order. If ``None``, don't sort. Otherwise, a callable function will be applied to the labels for sorting. panel_size: ``tuple``, optional (default: ``(6, 4)``) The plot size (width, height) in inches. palette: ``List[str]``, optional (default: ``None``) Used for setting colors for categories in ``condition``. Within the list, each string is the color for one category. left: ``float``, optional (default: ``0.15``) This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]). bottom: ``float``, optional (default: ``0.15``) This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]). wspace: ``float``, optional (default: ``0.3``) This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]). hspace: ``float``, optional (defualt: ``0.15``) This parameter sets the height between panels and also the figure's top margin as a fraction of panel's height (hspace * panel_size[1]). return_fig: ``bool``, optional, default: ``False`` Return a ``Figure`` object if ``True``; return ``None`` otherwise. dpi: ``float``, optional, default: ``300.0`` The resolution in dots per inch. Returns ------- ``Figure`` object A ``matplotlib.figure.Figure`` object containing the dot plot if ``return_fig == True`` Examples -------- >>> fig = pg.compo_plot(data, 'louvain_labels', 'Donor', style = 'normalized') """ if groupby_label is None: groupby_label = groupby fig, ax = _get_subplot_layouts(panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=hspace) # default nrows = 1 & ncols = 1 restr_obj = RestrictionParser(restrictions) restr_obj.calc_default(data) selected = restr_obj.get_satisfied(data) df = pd.crosstab(data.obs.loc[selected, groupby], data.obs.loc[selected, condition]) index_values = df.index.tolist() column_values = df.columns.tolist() if sort_function == "natsorted": sort_function = natsorted if callable(sort_function): index_values = sort_function(index_values) column_values = sort_function(column_values) if switch_axes: index_values.reverse() df = df.reindex(index = index_values, columns = column_values) if style == "frequency": df = df.div(df.sum(axis=1), axis=0) * 100.0 else: assert style == "normalized" df = df.div(df.sum(axis=0), axis=1) * 100.0 if color_unused: if palette is None: color_list = _get_palette(data.obs[condition].cat.categories.size) else: assert len(palette) >= data.obs[condition].cat.categories.size, "The palette provided has fewer colors than needed!" color_idx = df.columns.map(data.obs[condition].cat.categories.get_loc) color_list = palette[color_idx] else: if palette is None: color_list = _get_palette(df.shape[1]) else: assert len(palette) >= df.shape[1], "The palette provided has fewer colors than needed!" color_list = palette[0:df.shape[1]] df.plot( kind = "bar" if not switch_axes else "barh", stacked = style == "frequency", legend = False, color = color_list, ax = ax, ) ax.grid(False) if not switch_axes: ax.set_xlabel(groupby_label) ax.set_ylabel("Percentage") else: ax.set_xlabel("Percentage") ax.set_ylabel(groupby_label) ax.legend(loc="center left", bbox_to_anchor=(1.05, 0.5)) if len(max(df.index.astype(str), key=len)) >= 5: ax.set_xticklabels(ax.get_xticklabels(), rotation=-45, ha='left') return fig if return_fig else None def violin( data: Union[MultimodalData, UnimodalData, anndata.AnnData], attrs: Union[str, List[str]], groupby: str, hue: Optional[str] = None, matkey: Optional[str] = None, stripplot: Optional[bool] = False, inner: Optional[str] = None, scale: Optional[str] = 'width', panel_size: Optional[Tuple[float, float]] = (8, 0.5), palette: Optional[List[str]] = None, left: Optional[float] = 0.15, bottom: Optional[float] = 0.15, wspace: Optional[float] = 0.1, ylabel: Optional[str] = None, return_fig: Optional[bool] = False, dpi: Optional[float] = 300.0, **kwargs, ) -> Union[plt.Figure, None]: """ Generate a stacked violin plot. Parameters ---------- data: ``AnnData`` or ``MultimodalData`` or ``UnimodalData`` object Single-cell expression data. attrs: ``str`` or ``List[str]`` Cell attributes or features to plot. Cell attributes must exist in ``data.obs`` and must be numeric. Features must exist in ``data.var``. groupby: ``str`` A categorical variable in data.obs that is used to categorize the cells, e.g. Clusters. hue: ``str``, optional, default: None 'hue' should be a categorical variable in data.obs that has only two levels. Set 'hue' will show us split violin plots. matkey: ``str``, optional, default: ``None`` If matkey is set, select matrix with matkey as keyword in the current modality. Only works for MultimodalData or UnimodalData objects. stripplot: ``bool``, optional, default: ``False`` Attach a stripplot to the violinplot or not. This option will be automatically turn off if 'hue' is set. inner: ``str``, optional, default: ``None`` Representation of the datapoints in the violin interior: - If ``box``, draw a miniature boxplot. - If ``quartiles``, draw the quartiles of the distribution. - If ``point`` or ``stick``, show each underlying datapoint. - If ``None``, will draw unadorned violins. scale: ``str``, optional, default: ``width`` The method used to scale the width of each violin: - If ``width``, each violin will have the same width. - If ``area``, each violin will have the same area. - If ``count``, the width of the violins will be scaled by the number of observations in that bin. panel_size: ``Tuple[float, float]``, optional, default: ``(8, 0.5)`` The size (width, height) in inches of each violin panel. palette: ``List[str]``, optional (default: ``None``) Used for setting colors for categories in ``groupby``. Within the list, each string is the color for one category. left: ``float``, optional, default: ``0.15`` This parameter sets the figure's left margin as a fraction of panel's width (left * panel_size[0]). bottom: ``float``, optional, default: ``0.15`` This parameter sets the figure's bottom margin as a fraction of panel's height (bottom * panel_size[1]). wspace: ``float``, optional, default: ``0.1`` This parameter sets the width between panels and also the figure's right margin as a fraction of panel's width (wspace * panel_size[0]). ylabel: ``str``, optional, default: ``None`` Y-axis label. No label to show if ``None``. return_fig: ``bool``, optional, default: ``False`` Return a ``Figure`` object if ``True``; return ``None`` otherwise. dpi: ``float``, optional, default: ``300.0`` The resolution in dots per inch. kwargs Are passed to ``seaborn.violinplot``. Returns ------- ``Figure`` object A ``matplotlib.figure.Figure`` object containing the dot plot if ``show == False`` Examples -------- >>> pg.violin(data, attrs=['CD14', 'TRAC', 'CD34'], groupby='louvain_labels') """ if not is_list_like(attrs): attrs = [attrs] if not isinstance(data, anndata.AnnData): cur_matkey = data.current_matrix() if matkey is not None: assert not isinstance(data, anndata.AnnData) data.select_matrix(matkey) nrows = len(attrs) fig, axes = _get_subplot_layouts(nrows=nrows, ncols=1, panel_size=panel_size, dpi=dpi, left=left, bottom=bottom, wspace=wspace, hspace=0, squeeze=False, sharey=False) obs_keys = [] genes = [] for key in attrs: if key in data.obs: assert is_numeric_dtype(data.obs[key]) obs_keys.append(key) else: if key not in data.var_names: logger.warning(f"Cannot find gene {key}. Please make sure all genes are included in data.var_names before running this function!") return None genes.append(key) df_list = [pd.DataFrame({"label": data.obs[groupby].values})] if hue is not None: df_list.append(pd.DataFrame({hue: data.obs[hue].values})) stripplot = False if len(obs_keys) > 0: df_list.append(data.obs[obs_keys].reset_index(drop=True)) if len(genes) > 0: expr_mat = slicing(data[:, genes].X) df_list.append(pd.DataFrame(data=expr_mat, columns=genes)) df = pd.concat(df_list, axis = 1) for i in range(nrows): ax = axes[i, 0] if stripplot: sns.stripplot(x="label", y=attrs[i], hue = hue, data=df, ax=ax, size=1, color="k", jitter=True) sns.violinplot(x="label", y=attrs[i], hue = hue, data=df, inner=inner, linewidth=1, ax=ax, cut=0, scale=scale, split=True, palette=palette, **kwargs) ax.grid(False) if hue is not None: if
<filename>multi-threading.py #!/usr/bin/env python # coding: utf-8 # # Dense 3D Face Correspondence # In[1]: import os os.environ["MKL_NUM_THREADS"] = "12" os.environ["NUMEXPR_NUM_THREADS"] = "12" os.environ["OMP_NUM_THREADS"] = "12" # In[2]: import pdb import numpy as np from collections import defaultdict import time, warnings import re import threading import cv2 import ipyvolume as ipv import scipy from math import cos, sin from scipy import meshgrid, interpolate import pdb import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.spatial import ConvexHull, Delaunay import numpy as np from scipy.interpolate import griddata warnings.filterwarnings("ignore") #Read each face data, normalize it and get the interpolation it in a parallel fashion def get_data(file_path,var_name): #global points and grid data structure which will be modified by all threads global face_points global grid_data holder = [] #reading face data from path with open(file_path, "r") as vrml: for line in vrml: a = line.strip().strip(",").split() if len(a) == 3: try: holder.append(list(map(float, a))) except: pass x,y,z = zip(*holder) x = np.array(x) y = np.array(y) z = np.array(z) holder = np.array(holder) #normalizing face maxind = np.argmax(holder[:,2]) nosex = holder[maxind,0] nosey = holder[maxind,1] nosez = holder[maxind,2] holder = holder - np.array([nosex, nosey, nosez]) face_points[var_name] = holder # grid data extraction x1, y1, z1 = map(np.array, zip(*holder)) grid_x, grid_y = np.mgrid[np.amin(x1):np.amax(x1):0.5, np.amin(y1):np.amax(y1):0.5] grid_z = griddata((x1, y1), z1, (grid_x, grid_y), method='linear') grid_data[var_name] = [grid_x, grid_y, grid_z] # ## Sparse Correspondence Initialization # ## Seed points sampling using mean 2D convex hull def hull72(points, nosex, nosey, nosez): newhull = [[nosex, nosey, nosez]] for theta in range(0, 360, 5): fx = 200 * cos(theta * np.pi / 180) fy = 200 * sin(theta * np.pi / 180) nearest_point = min(zip(points[:, 0], points[:, 1], points[:, 2]), key=lambda p:(p[0] - fx)**2 + (p[1] - fy)**2) newhull.append(nearest_point) return newhull def get_hull(points): maxind = np.argmax(points[:,2]) # coordinates of nose, nosex = x coordinate of nose, similarly for nosey and nosez nosex = points[maxind,0] nosey = points[maxind,1] nosez = points[maxind,2] hull = np.array(hull72(points, nosex,nosey,nosez)) return hull # ## Delaunay Triangulation def triangulation(hull): points2D = np.vstack([hull[:,0],hull[:,1]]).T tri_hull = Delaunay(points2D) return tri_hull # ## Geodesic Patch Extraction def get_all_patches_for_face(face_index, hull, triangles): from itertools import combinations points = face_points["face"+str(face_index)] patch_width = 5 * rho def distance(x,y,z,x1,y1,z1,x2,y2,z2): a = (y2-y1)/(x2-x1) b = -1 c = y2-x2*(y2-y1)/(x2-x1) return abs(a*x+b*y+c)/(a**2+b**2)**0.5 all_patches = [] for t1,t2 in combinations(triangles,r=2): #pairwise triangles if len(set(t1)&set(t2))==2: #triangles with a common edge patch_list = [] a_ind, b_ind = list(set(t1)&set(t2)) x1, y1, z1 = hull[a_ind,:] x2, y2, z2 = hull[b_ind,:] for x,y,z in points: #loop over all points to find patch points if (x-x1/2-x2/2)**2+(y-y1/2-y2/2)**2<(x1/2-x2/2)**2+(y1/2-y2/2)**2 and distance(x,y,z,x1,y1,z1,x2,y2,z2)<patch_width: patch_list.append([x,y,z]) if len(patch_list)==0: #print("ALERT: NO PATCH FOR AN EDGE!!!!") pass all_patches.append(np.array(patch_list)) global patches for edge_index in range(len(all_patches)): patches["edge" + str(edge_index)].append(all_patches[edge_index]) def update_patches(hull, triangles): threads = [] for face_index in range(1, len(file_paths)+1): thread = threading.Thread(target=get_all_patches_for_face, args=(face_index, hull, triangles)) threads.append(thread) thread.start() for thread in threads: thread.join() # takes in a point and the patch it belongs to and decides whether it is a keypoint (ratio of largest two eigenvalues on the covariance matrix of its local surface) or not def is_keypoint(point, points): threshold = 7 * rho nhood = points[(np.sum(np.square(points-point),axis=1)) < threshold**2] try: nhood = (nhood - np.min(nhood, axis=0)) / (np.max(nhood, axis=0) - np.min(nhood, axis=0)) covmat = np.cov(nhood) eigvals = np.sort(np.abs(np.linalg.eigvalsh(covmat))) ratio = eigvals[-1]/(eigvals[-2]+0.0001) return ratio>30 #eigen_ratio_threshold #/ 5 except Exception as e: return False def get_keypoints_from_patch(edge_index): global keypoints edge_patches = patches["edge" + str(edge_index)] edge_keypoints = [] for patch in edge_patches: #print(patch.shape) if patch.shape[0]: patch_keypoints = patch[np.apply_along_axis(is_keypoint, 1, patch, patch)] # keypoints in `patch` else: patch_keypoints = [] edge_keypoints.append(patch_keypoints) keypoints["edge" + str(edge_index)] = edge_keypoints def update_keypoints(patches): threads = [] for edge_index in range(1, len(patches)+1): thread = threading.Thread(target=get_keypoints_from_patch, args=(edge_index,)) thread.start() threads.append(thread) for thread in threads: thread.join() def get_normal(x, y, grid_x, grid_y, grid_z): ''' 3 1 2 4 x, y are coordinates of the point for which the normal has to be calculated ''' i = (x - grid_x[0, 0]) / (grid_x[1, 0] - grid_x[0, 0]) j = (y - grid_y[0, 0]) / (grid_y[0, 1] - grid_y[0, 0]) i,j = int(round(i)), int(round(j)) if (not 0 <= i < grid_x.shape[0]-1) or (not 0 <= j < grid_y.shape[1]-1): warnings.warn("out of bounds error") #pdb.set_trace() return "None" point1 = (grid_x[i-1, j], grid_y[i-1, j], grid_z[i-1, j]) point2 = (grid_x[i+1, j], grid_y[i+1, j], grid_z[i+1, j]) point3 = (grid_x[i, j-1], grid_y[i, j-1], grid_z[i, j-1]) point4 = (grid_x[i, j+1], grid_y[i, j+1], grid_z[i, j+1]) a1, a2, a3 = [point2[x] - point1[x] for x in range(3)] b1, b2, b3 = [point3[x] - point4[x] for x in range(3)] normal = np.array([a3*b2, a1*b3, -a1*b2]) return normal/np.linalg.norm(normal) def get_keypoint_features(keypoints, face_index): feature_list = [] # a list to store extracted features of each keypoint final_keypoints = [] # remove unwanted keypoints, like the ones on edges etc for point in keypoints: point_features = [] x, y, z = point points = face_points["face" + str(face_index)] grid_x, grid_y, grid_z = grid_data["face" + str(face_index)] threshold = 5 * rho nhood = points[(np.sum(np.square(points-point), axis=1)) < threshold**2] xy_hu_moments = cv2.HuMoments(cv2.moments(nhood[:, :2])).flatten() yz_hu_moments = cv2.HuMoments(cv2.moments(nhood[:, 1:])).flatten() xz_hu_moments = cv2.HuMoments(cv2.moments(nhood[:, ::2])).flatten() hu_moments = np.concatenate([xy_hu_moments, yz_hu_moments, xz_hu_moments]) normal = get_normal(x, y, grid_x, grid_y, grid_z) if normal == "None": # array comparision raises ambiguity error, so None passed as string continue final_keypoints.append(point) point_features.extend(np.array([x, y, z])) # spatial location point_features.extend(normal) point_features.extend(hu_moments) point_features = np.array(point_features) feature_list.append(point_features) final_keypoints = np.array(final_keypoints) return final_keypoints, feature_list def get_features(edge_index): global features, keypoints edgewise_keypoint_features = [] # store features of keypoints for a given edge_index across all faces for face_index in range(1, len(file_paths)+1): try: edge_keypoints = keypoints["edge" + str(edge_index)][face_index-1] final_keypoints, keypoint_features = get_keypoint_features(edge_keypoints, face_index) keypoints["edge" + str(edge_index)][face_index-1] = final_keypoints # update the keypoint, remove unwanted keypoints like those on the edge etc except: # for no keypoints, no features keypoint_features = [] edgewise_keypoint_features.append(keypoint_features) features["edge" + str(edge_index)] = edgewise_keypoint_features def update_features(keypoints): threads = [] for edge_index in range(1, len(keypoints)+1): thread = threading.Thread(target=get_features, args=(edge_index, )) thread.start() threads.append(thread) for thread in threads: thread.join() def get_keypoint_under_2rho(keypoints, point): """return the index of the keypoint in `keypoints` which is closest to `point` if that distance is less than 2 * rho, else return None""" try: distance = np.sqrt(np.sum(np.square(keypoints-point), axis=1)) if (distance < 3*rho).any(): min_dist_index = np.argmin(distance) return min_dist_index except Exception as e: # keypoints is [], gotta return None pass return None def get_matching_keypoints(edge_keypoints, edge_features, edge_index): # check if a bunch of keypoints across the patches (across all faces) are withing 2*rho and their euclidean dist < Kq # first get all the keypoints in a list matching_keypoints_list = [] for face_index1 in range(len(edge_keypoints)): # take a patch along the edge among the faces for point_index, point in enumerate(edge_keypoints[face_index1]): # take a keypoint in that patch, we have to find corresponding keypoints in each other patche along this edge matched_keypoint_indices = [] # to store indices of matched keypoints across the patches for face_index2 in range(len(edge_keypoints)): # find if matching keypoints exist across the patches along that edge across all faces if face_index2 == face_index1: matched_keypoint_indices.append(point_index) continue matched_keypoint = get_keypoint_under_2rho(edge_keypoints[face_index2], point) if matched_keypoint: #if edge_index == 36: pdb.set_trace()I# matched_keypoint_indices.append(matched_keypoint) else: # no keypoint was matched in the above patch (face_index2), gotta start search on other keypoint from face_index1 break if len(matched_keypoint_indices) == len(edge_keypoints): # there's a corresponding keypoint for each patch across all faces matching_keypoints_list.append(matched_keypoint_indices) if len(matching_keypoints_list) == 0: return [] # now we have those keypoints which are in vicinity of 2*rho, let's compute euclidean distance of their feature vectors final_matched_keypoints = [] for matched_keypoints in matching_keypoints_list: # select first list of matching keypoints # get the indices, get their corresponding features, compute euclidean distance try: features = np.array([edge_features[face_index][idx] for face_index, idx in zip(range(len(edge_features)), matched_keypoints)]) euc_dist_under_kq = lambda feature, features: np.sqrt(np.sum(np.square(features - feature), axis=1)) < Kq if np.apply_along_axis(euc_dist_under_kq, 1, features, features).all() == True: # we have got a set of matching keypoints, get their mean coordinates matched_coords = [edge_keypoints[face_index][idx] for face_index, idx in zip(range(len(edge_features)), matched_keypoints)] final_matched_keypoints.append(np.mean(matched_coords, axis=0)) except: pdb.set_trace() return final_matched_keypoints def keypoint_matching_thread(edge_index): global new_keypoints, edge_keypoints, edge_features edge_keypoints = keypoints["edge" + str(edge_index)] edge_features = features["edge" + str(edge_index)] matched_keypoints = get_matching_keypoints(edge_keypoints, edge_features, edge_index) if len(matched_keypoints): new_keypoints.extend(matched_keypoints) # those keypoints which are in vicinity of 2*rho are considered for matching # matching is done using constrained nearest neighbour # choose an edge, select a keypoint, find out keypoints
# coding: utf-8 """Tests for lightgbm.dask module""" import socket from itertools import groupby from os import getenv from sys import platform import lightgbm as lgb import pytest if not platform.startswith('linux'): pytest.skip('lightgbm.dask is currently supported in Linux environments', allow_module_level=True) if not lgb.compat.DASK_INSTALLED: pytest.skip('Dask is not installed', allow_module_level=True) import dask.array as da import dask.dataframe as dd import numpy as np import pandas as pd from scipy.stats import spearmanr from dask.array.utils import assert_eq from distributed.utils_test import client, cluster_fixture, gen_cluster, loop from scipy.sparse import csr_matrix from sklearn.datasets import make_blobs, make_regression from sklearn.utils import check_random_state from .utils import make_ranking # time, in seconds, to wait for the Dask client to close. Used to avoid teardown errors # see https://distributed.dask.org/en/latest/api.html#distributed.Client.close CLIENT_CLOSE_TIMEOUT = 120 data_output = ['array', 'scipy_csr_matrix', 'dataframe'] data_centers = [[[-4, -4], [4, 4]], [[-4, -4], [4, 4], [-4, 4]]] group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50] pytestmark = [ pytest.mark.skipif(getenv('TASK', '') == 'mpi', reason='Fails to run with MPI interface'), pytest.mark.skipif(getenv('TASK', '') == 'gpu', reason='Fails to run with GPU interface') ] @pytest.fixture() def listen_port(): listen_port.port += 10 return listen_port.port listen_port.port = 13000 def _create_ranking_data(n_samples=100, output='array', chunk_size=50, **kwargs): X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs) rnd = np.random.RandomState(42) w = rnd.rand(X.shape[0]) * 0.01 g_rle = np.array([len(list(grp)) for _, grp in groupby(g)]) if output == 'dataframe': # add target, weight, and group to DataFrame so that partitions abide by group boundaries. X_df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])]) X = X_df.copy() X_df = X_df.assign(y=y, g=g, w=w) # set_index ensures partitions are based on group id. # See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function. X_df.set_index('g', inplace=True) dX = dd.from_pandas(X_df, chunksize=chunk_size) # separate target, weight from features. dy = dX['y'] dw = dX['w'] dX = dX.drop(columns=['y', 'w']) dg = dX.index.to_series() # encode group identifiers into run-length encoding, the format LightGBMRanker is expecting # so that within each partition, sum(g) = n_samples. dg = dg.map_partitions(lambda p: p.groupby('g', sort=False).apply(lambda z: z.shape[0])) elif output == 'array': # ranking arrays: one chunk per group. Each chunk must include all columns. p = X.shape[1] dX, dy, dw, dg = [], [], [], [] for g_idx, rhs in enumerate(np.cumsum(g_rle)): lhs = rhs - g_rle[g_idx] dX.append(da.from_array(X[lhs:rhs, :], chunks=(rhs - lhs, p))) dy.append(da.from_array(y[lhs:rhs])) dw.append(da.from_array(w[lhs:rhs])) dg.append(da.from_array(np.array([g_rle[g_idx]]))) dX = da.concatenate(dX, axis=0) dy = da.concatenate(dy, axis=0) dw = da.concatenate(dw, axis=0) dg = da.concatenate(dg, axis=0) else: raise ValueError('Ranking data creation only supported for Dask arrays and dataframes') return X, y, w, g_rle, dX, dy, dw, dg def _create_data(objective, n_samples=100, centers=2, output='array', chunk_size=50): if objective == 'classification': X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42) elif objective == 'regression': X, y = make_regression(n_samples=n_samples, random_state=42) else: raise ValueError("Unknown objective '%s'" % objective) rnd = np.random.RandomState(42) weights = rnd.random(X.shape[0]) * 0.01 if output == 'array': dX = da.from_array(X, (chunk_size, X.shape[1])) dy = da.from_array(y, chunk_size) dw = da.from_array(weights, chunk_size) elif output == 'dataframe': X_df = pd.DataFrame(X, columns=['feature_%d' % i for i in range(X.shape[1])]) y_df = pd.Series(y, name='target') dX = dd.from_pandas(X_df, chunksize=chunk_size) dy = dd.from_pandas(y_df, chunksize=chunk_size) dw = dd.from_array(weights, chunksize=chunk_size) elif output == 'scipy_csr_matrix': dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csr_matrix) dy = da.from_array(y, chunks=chunk_size) dw = da.from_array(weights, chunk_size) else: raise ValueError("Unknown output type '%s'" % output) return X, y, weights, dX, dy, dw def _r2_score(dy_true, dy_pred): numerator = ((dy_true - dy_pred) ** 2).sum(axis=0, dtype=np.float64) denominator = ((dy_true - dy_pred.mean(axis=0)) ** 2).sum(axis=0, dtype=np.float64) return (1 - numerator / denominator).compute() def _accuracy_score(dy_true, dy_pred): return da.average(dy_true == dy_pred).compute() @pytest.mark.parametrize('output', data_output) @pytest.mark.parametrize('centers', data_centers) def test_classifier(output, centers, client, listen_port): X, y, w, dX, dy, dw = _create_data( objective='classification', output=output, centers=centers ) params = { "n_estimators": 10, "num_leaves": 10 } dask_classifier = lgb.DaskLGBMClassifier( time_out=5, local_listen_port=listen_port, **params ) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw, client=client) p1 = dask_classifier.predict(dX) p1_proba = dask_classifier.predict_proba(dX).compute() p1_local = dask_classifier.to_local().predict(X) s1 = _accuracy_score(dy, p1) p1 = p1.compute() local_classifier = lgb.LGBMClassifier(**params) local_classifier.fit(X, y, sample_weight=w) p2 = local_classifier.predict(X) p2_proba = local_classifier.predict_proba(X) s2 = local_classifier.score(X, y) assert_eq(s1, s2) assert_eq(p1, p2) assert_eq(y, p1) assert_eq(y, p2) assert_eq(p1_proba, p2_proba, atol=0.3) assert_eq(p1_local, p2) assert_eq(y, p1_local) client.close(timeout=CLIENT_CLOSE_TIMEOUT) @pytest.mark.parametrize('output', data_output) @pytest.mark.parametrize('centers', data_centers) def test_classifier_pred_contrib(output, centers, client, listen_port): X, y, w, dX, dy, dw = _create_data( objective='classification', output=output, centers=centers ) params = { "n_estimators": 10, "num_leaves": 10 } dask_classifier = lgb.DaskLGBMClassifier( time_out=5, local_listen_port=listen_port, tree_learner='data', **params ) dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw, client=client) preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True).compute() local_classifier = lgb.LGBMClassifier(**params) local_classifier.fit(X, y, sample_weight=w) local_preds_with_contrib = local_classifier.predict(X, pred_contrib=True) if output == 'scipy_csr_matrix': preds_with_contrib = np.array(preds_with_contrib.todense()) # shape depends on whether it is binary or multiclass classification num_features = dask_classifier.n_features_ num_classes = dask_classifier.n_classes_ if num_classes == 2: expected_num_cols = num_features + 1 else: expected_num_cols = (num_features + 1) * num_classes # * shape depends on whether it is binary or multiclass classification # * matrix for binary classification is of the form [feature_contrib, base_value], # for multi-class it's [feat_contrib_class1, base_value_class1, feat_contrib_class2, base_value_class2, etc.] # * contrib outputs for distributed training are different than from local training, so we can just test # that the output has the right shape and base values are in the right position assert preds_with_contrib.shape[1] == expected_num_cols assert preds_with_contrib.shape == local_preds_with_contrib.shape if num_classes == 2: assert len(np.unique(preds_with_contrib[:, num_features]) == 1) else: for i in range(num_classes): base_value_col = num_features * (i + 1) + i assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1) client.close(timeout=CLIENT_CLOSE_TIMEOUT) def test_training_does_not_fail_on_port_conflicts(client): _, _, _, dX, dy, dw = _create_data('classification', output='array') with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('127.0.0.1', 12400)) dask_classifier = lgb.DaskLGBMClassifier( time_out=5, local_listen_port=12400, n_estimators=5, num_leaves=5 ) for _ in range(5): dask_classifier.fit( X=dX, y=dy, sample_weight=dw, client=client ) assert dask_classifier.booster_ client.close(timeout=CLIENT_CLOSE_TIMEOUT) @pytest.mark.parametrize('output', data_output) def test_regressor(output, client, listen_port): X, y, w, dX, dy, dw = _create_data( objective='regression', output=output ) params = { "random_state": 42, "num_leaves": 10 } dask_regressor = lgb.DaskLGBMRegressor( time_out=5, local_listen_port=listen_port, tree='data', **params ) dask_regressor = dask_regressor.fit(dX, dy, client=client, sample_weight=dw) p1 = dask_regressor.predict(dX) if output != 'dataframe': s1 = _r2_score(dy, p1) p1 = p1.compute() p1_local = dask_regressor.to_local().predict(X) s1_local = dask_regressor.to_local().score(X, y) local_regressor = lgb.LGBMRegressor(**params) local_regressor.fit(X, y, sample_weight=w) s2 = local_regressor.score(X, y) p2 = local_regressor.predict(X) # Scores should be the same if output != 'dataframe': assert_eq(s1, s2, atol=.01) assert_eq(s1, s1_local, atol=.003) # Predictions should be roughly the same assert_eq(y, p1, rtol=1., atol=100.) assert_eq(y, p2, rtol=1., atol=50.) assert_eq(p1, p1_local) client.close(timeout=CLIENT_CLOSE_TIMEOUT) @pytest.mark.parametrize('output', data_output) def test_regressor_pred_contrib(output, client, listen_port): X, y, w, dX, dy, dw = _create_data( objective='regression', output=output ) params = { "n_estimators": 10, "num_leaves": 10 } dask_regressor = lgb.DaskLGBMRegressor( time_out=5, local_listen_port=listen_port, tree_learner='data', **params ) dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw, client=client) preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute() local_regressor = lgb.LGBMRegressor(**params) local_regressor.fit(X, y, sample_weight=w) local_preds_with_contrib = local_regressor.predict(X, pred_contrib=True) if output == "scipy_csr_matrix": preds_with_contrib = np.array(preds_with_contrib.todense()) # contrib outputs for distributed training are different than from local training, so we can just test # that the output has the right shape and base values are in the right position num_features = dX.shape[1] assert preds_with_contrib.shape[1] == num_features + 1 assert preds_with_contrib.shape == local_preds_with_contrib.shape client.close(timeout=CLIENT_CLOSE_TIMEOUT) @pytest.mark.parametrize('output', data_output) @pytest.mark.parametrize('alpha', [.1, .5, .9]) def test_regressor_quantile(output, client, listen_port, alpha): X, y, w, dX, dy, dw = _create_data( objective='regression', output=output ) params = { "objective": "quantile", "alpha": alpha, "random_state": 42, "n_estimators": 10, "num_leaves": 10 } dask_regressor = lgb.DaskLGBMRegressor( local_listen_port=listen_port, tree_learner_type='data_parallel', **params ) dask_regressor = dask_regressor.fit(dX, dy, client=client, sample_weight=dw) p1 = dask_regressor.predict(dX).compute() q1 = np.count_nonzero(y < p1) / y.shape[0] local_regressor = lgb.LGBMRegressor(**params) local_regressor.fit(X, y, sample_weight=w) p2 = local_regressor.predict(X) q2 = np.count_nonzero(y < p2) / y.shape[0] # Quantiles should be right np.testing.assert_allclose(q1, alpha, atol=0.2) np.testing.assert_allclose(q2, alpha, atol=0.2) client.close(timeout=CLIENT_CLOSE_TIMEOUT) @pytest.mark.parametrize('output', ['array', 'dataframe']) @pytest.mark.parametrize('group', [None, group_sizes]) def test_ranker(output, client, listen_port, group): X, y, w, g, dX, dy, dw, dg = _create_ranking_data( output=output, group=group ) # use many trees + leaves to overfit, help ensure that dask data-parallel strategy matches that of # serial learner. See https://github.com/microsoft/LightGBM/issues/3292#issuecomment-671288210. params = { "random_state": 42, "n_estimators": 50, "num_leaves": 20, "min_child_samples": 1 } dask_ranker = lgb.DaskLGBMRanker( time_out=5, local_listen_port=listen_port, tree_learner_type='data_parallel', **params ) dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg, client=client) rnkvec_dask = dask_ranker.predict(dX) rnkvec_dask = rnkvec_dask.compute() rnkvec_dask_local = dask_ranker.to_local().predict(X) local_ranker = lgb.LGBMRanker(**params) local_ranker.fit(X, y, sample_weight=w, group=g) rnkvec_local = local_ranker.predict(X) # distributed ranker should be able to rank decently well and should # have high rank correlation with scores from serial ranker. dcor = spearmanr(rnkvec_dask, y).correlation assert dcor > 0.6 assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.75 assert_eq(rnkvec_dask, rnkvec_dask_local) client.close(timeout=CLIENT_CLOSE_TIMEOUT) def test_find_open_port_works(): worker_ip = '127.0.0.1' with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((worker_ip, 12400)) new_port = lgb.dask._find_open_port( worker_ip=worker_ip, local_listen_port=12400, ports_to_skip=set() ) assert new_port == 12401 with
<reponame>KurmasanaWT/community<filename>codes/correl.py from dash import dcc, html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output, State import numpy as np import pandas as pd import plotly.io as pio import plotly.graph_objects as go from plotly.subplots import make_subplots import yfinance as yf import math from sklearn.linear_model import LinearRegression from app import app np.seterr(divide='ignore') pd.options.display.float_format = '{:,.2f}'.format # FORMATA E CONFIGURA GRÁFICOS pio.templates["draft"] = go.layout.Template( layout=go.Layout( title_x = 0.0, title_pad = dict(l=10, t=10), margin = dict(l=50,t=50, b=50, r=50, pad=0, autoexpand=True), font = dict(family="Arial", size=10), autosize=True, ), layout_annotations=[ dict( name="draft watermark", text="KWT-Community", textangle=-30, opacity=0.03, font=dict(family="Arial", color="black", size=80), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ] ) pio.templates.default = "seaborn+draft" plotres:dict = dict(width=1920, height=1080) config1 = { "displaylogo": False, "toImageButtonOptions": plotres, "modeBarButtonsToAdd": [ "drawline", "drawopenpath", "drawclosedpath", "drawcircle", "drawrect", "eraseshape", "hoverClosestCartesian", "hoverCompareCartesian" ] } # INPUTS PARA DROPDOWN MENU tickers = pd.read_csv('db/tickers.csv', delimiter=';') # ativos da na bolsa brasileira tickers['label'] = tickers['value']+" - "+tickers['label'] tickers['value'] = tickers['value']+".SA" other = pd.read_csv('db/other.csv', delimiter=';') # outros ativos e índices other['label'] = other['value']+" - "+other['label'] tickers=pd.concat([tickers,other]) tickers = tickers.to_dict('records') periods = pd.read_csv('db/periods.csv', delimiter=';').to_dict('records') # períodos de análise intervals = pd.read_csv('db/intervals.csv', delimiter=';').to_dict('records') # intervalos entre dados do período def market_beta(X,Y,N): """ X = The independent variable which is the Market Y = The dependent variable which is the Stock N = The length of the Window It returns the alphas and the betas of the rolling regression """ # all the observations obs = len(X) # initiate the betas with null values betas = np.full(obs, np.nan) # initiate the alphas with null values alphas = np.full(obs, np.nan) for i in range((obs-N)): regressor = LinearRegression() regressor.fit(X.to_numpy()[i : i + N+1].reshape(-1,1), Y.to_numpy()[i : i + N+1]) betas[i+N] = regressor.coef_[0] alphas[i+N] = regressor.intercept_ return(alphas, betas) # LAYOUT layout = dbc.Container( children=[ dcc.Loading( #className="kwtload", id="load_o1", color='#0a0', style={'background-color':'rgba(0, 0, 0, 0.5)'}, parent_style={}, fullscreen=True, children=html.Span(id="correl_load_o1", children=["LOADING..."]), type="default", ), dbc.Row([ html.Div(className='kwtdrops', children=[ html.H5("ATIVO"), dcc.Dropdown( id="ticker", options=tickers, value='VALE3.SA', clearable=False, style={'width':'300px'} ), html.H5("BENCHMARK"), dcc.Dropdown( id="indexer", options=tickers, value='^BVSP', clearable=False, style={'width':'300px'} ), html.H5("PERÍODO"), dcc.Dropdown( id="periods", options=periods, value='1y', clearable=False, style={'width':'10rem'} ), html.H5("INTERVALO"), dcc.Dropdown( id="intervals", options=intervals, value='1d', clearable=False, style={'width':'10rem'} ), dbc.Button(className="kwtchartbtn",id='submitb', n_clicks=0, children='Atualizar') ]), ]), html.Br(), dbc.Row([ dcc.Graph(id="correl_graph", config=config1), dcc.Graph(id="correl_graph1", config=config1), dcc.Graph(id="correl_graph2", config=config1), dcc.Graph(id="correl_graph3", config=config1), ]), ], fluid=True) def get(): return html.Div(layout) ####### CALLBACKS ####### ####### CALLBACK PAINEL MERCADO # @app.callback( [ Output("correl_graph", "figure"), Output("correl_graph1", "figure"), Output("correl_graph2", "figure"), Output("correl_graph3", "figure"), Output("correl_load_o1", "children") ], [ Input('submitb', 'n_clicks') ], [ State("ticker", "value"), State("indexer", "value"), State("periods", "value"), State("intervals", "value") ], ) ###### FUNC. CALLBACK PAINEL MERCADO # def display(sutb, tkr, idx, prd, itv): per21dd=21 per50dd=50 per200dd=200 ####### DOWNLOAD DE PREÇO E VOLUME DO ATIVO ANALISADO df = yf.download(tkr, interval=itv, period=prd) df = pd.DataFrame(df) df = df[df.index.dayofweek < 5] df.dropna(inplace=True) #df.fillna( method ='ffill', inplace = True) #df.fillna( method ='bfill', inplace = True) ### VARIAÇÃO E RETORNO #df['Return'] = (np.log(df.Close / df.Close.shift(1)))*100 df['Return'] = df.Close.pct_change() print(df['Return'].isnull().sum()) df.dropna(inplace=True) print(df['Return'].isnull().sum()) df['PrevClose']=df.Close.shift(1) df['VarClose']=((df.Close - df.Close.shift(1))/df.Close.shift(1))*100 #df['VarClose'] = df.Close.pct_change() df['Return21dd'] = (np.log(df.Close / df.Close.shift(per21dd)))*100 df['Return50dd'] = (np.log(df.Close / df.Close.shift(per50dd)))*100 df['Return200dd'] = (np.log(df.Close / df.Close.shift(per200dd)))*100 df['VarAcum'] = ((df.Close/df.Close.iloc[0])-1)*100 df['RetAcum'] = (np.log(df.Close / df.Close.iloc[0]))*100 df["RetAcumColor"] = np.where(df.RetAcum < 0, 'red', 'green') ### REVERSÃO À MÉDIA ARITIMÉTICA DE 21 DIAS df['CSMA21dd']=df.Close.rolling(per21dd).mean() df['RSMA21dd']=((df.Close/df['CSMA21dd'])-1)*100 df["RSMA21dd_Color"] = np.where(df.RSMA21dd < 0, 'red', 'green') ### REVERSÃO À MÉDIA ARITIMÉTICA DE 50 DIAS df['CSMA50dd']=df.Close.rolling(per50dd).mean() df['RSMA50dd']=((df.Close/df['CSMA50dd'])-1)*100 df["RSMA50dd_Color"] = np.where(df.RSMA50dd < 0, 'red', 'green') ### REVERSÃO À MÉDIA EXPONENCIAL DE 200 DIAS df['CEMA200dd']=df.Close.ewm(span=per200dd, min_periods=per200dd, adjust=True).mean() df['REMA200dd']=((df.Close/df['CEMA200dd'])-1)*100 df["REMA200dd_Color"] = np.where(df.REMA200dd < 0, 'red', 'green') #print(df['Return'].isnull().sum()) #df.Return ####### DOWNLOAD DE PREÇO E VOLUME DO ATIVO DE REFERÊNCIA (BENCHMARK) dfi = yf.download(idx, interval=itv, period=prd) dfi = pd.DataFrame(dfi) dfi = dfi[dfi.index.dayofweek < 5] dfi.dropna(inplace=True) #dfi.fillna( method ='ffill', inplace = True) #dfi.fillna( method ='bfill', inplace = True) ### VARIAÇÃO E RETORNO #dfi['Return'] = (np.log(dfi.Close / dfi.Close.shift(1)))*100 dfi['Return'] = dfi.Close.pct_change() print(dfi['Return'].isnull().sum()) dfi.dropna(inplace=True) print(dfi['Return'].isnull().sum()) dfi['PrevClose']=dfi.Close.shift(1) dfi['VarClose']=((dfi.Close - dfi.Close.shift(1))/dfi.Close.shift(1))*100 #dfi['VarClose'] = dfi.Close.pct_change() dfi['Return21dd'] = (np.log(dfi.Close / dfi.Close.shift(per21dd)))*100 dfi['Return50dd'] = (np.log(dfi.Close / dfi.Close.shift(per50dd)))*100 dfi['Return200dd'] = (np.log(dfi.Close / dfi.Close.shift(per200dd)))*100 dfi['VarAcum'] = ((dfi.Close/dfi.Close.iloc[0])-1)*100 dfi['RetAcum'] = (np.log(dfi.Close / dfi.Close.iloc[0]))*100 dfi["RetAcumColor"] = np.where(dfi.RetAcum < 0, 'red', 'green') ### REVERSÃO À MÉDIA ARITIMÉTICA DE 21 DIAS dfi['CSMA21dd']=dfi.Close.rolling(per21dd).mean() dfi['RSMA21dd']=((dfi.Close/dfi['CSMA21dd'])-1)*100 dfi["RSMA21dd_Color"] = np.where(dfi.RSMA21dd < 0, 'red', 'green') ### REVERSÃO À MÉDIA ARITIMÉTICA DE 50 DIAS dfi['CSMA50dd']=dfi.Close.rolling(per50dd).mean() dfi['RSMA50dd']=((dfi.Close/dfi['CSMA50dd'])-1)*100 dfi["RSMA50dd_Color"] = np.where(dfi.RSMA50dd < 0, 'red', 'green') ### REVERSÃO À MÉDIA EXPONENCIAL DE 200 DIAS dfi['CEMA200dd']=dfi.Close.ewm(span=per200dd, min_periods=per200dd, adjust=True).mean() dfi['REMA200dd']=((dfi.Close/dfi['CEMA200dd'])-1)*100 dfi["REMA200dd_Color"] = np.where(dfi.REMA200dd < 0, 'red', 'green') #print(dfi['Return'].isnull().sum()) #dfi.Return ### ROLLING CORRELATION df['RCorr21dd'] = df['VarClose'].rolling(per21dd).corr(dfi['VarClose']) df['RCorr50dd'] = df['VarClose'].rolling(per50dd).corr(dfi['VarClose']) df['RCorr200dd'] = df['VarClose'].rolling(per200dd).corr(dfi['VarClose']) ### RETORNO COMPARADO df['RetComp'] = df['RetAcum'] / dfi['RetAcum'] ### CALCULA ALPHA E BETA df['Alpha21dd'],df['Beta21dd'] = market_beta(df.Return, dfi.Return, 21) df['Alpha50dd'],df['Beta50dd'] = market_beta(df.Return, dfi.Return, 50) df['Alpha200dd'],df['Beta200dd'] = market_beta(df.Return, dfi.Return, 200) ####### CONSTROI GRÁFICOS # ### FIG 0 --------------------------------------------------------------------------- fig = go.Figure() fig.add_trace( go.Candlestick ( x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close, name=tkr) ) fig.add_trace( go.Scatter(x=df.index, y=df.CSMA21dd, mode='lines', name='MMA21', line_width=1,line_color='orange') ) fig.add_trace( go.Scatter(x=df.index, y=df.CSMA50dd, mode='lines', name='MMA50', line_width=1,line_color='navy') ) fig.add_trace( go.Scatter(x=df.index, y=df.CEMA200dd, mode='lines', name='EMA200', line_width=1,line_color='purple') ) ### FIG 1 --------------------------------------------------------------------------- fig1 = make_subplots( rows=1, cols=2, column_widths=[.85,.15], subplot_titles=("", "Histograma (Percent)") ) fig1.add_trace( go.Scatter(x=df.index, y=df.RSMA21dd, mode='lines', name='R_MMA21', line_width=1, line_color='orange'), col=1, row=1 ) fig1.add_trace( go.Scatter(x=df.index, y=df.RSMA50dd, mode='lines', name='R_MMA50', line_width=1,line_color='navy'), col=1, row=1 ) fig1.add_trace( go.Scatter(x=df.index, y=df.REMA200dd, mode='lines', name='R_EMA200', line_width=1,line_color='purple'), col=1, row=1 ) fig1.add_hline(y=0, line_color='black', line_dash='dot', line_width=1, annotation_text="Centro da Média", annotation_position="bottom left", col=1, row=1) fig1.add_trace( go.Histogram(x=df.RSMA21dd, name='R_MMA21', histnorm='percent', offsetgroup=0), col=2, row=1 ) fig1.add_trace( go.Histogram(x=df.RSMA50dd, name='R_MMA50', histnorm='percent', offsetgroup=0), col=2, row=1 ) fig1.add_trace( go.Histogram(x=df.REMA200dd, name='R_EMA200', histnorm='percent', offsetgroup=0), col=2, row=1 ) fig1.update_layout( xaxis=dict(showgrid=False), xaxis2=dict(showgrid=False) ) fig1.update_traces(bingroup='overlay', nbinsx=20, opacity=0.5, col=2, row=1, cumulative_enabled=False) ### FIG 2 --------------------------------------------------------------------------- fig2 = make_subplots( rows=3, cols=2, #subplot_titles=("Reversão à Média", "Indicador"), column_widths=[0.85,.15], row_heights=[.33, .33, .33], specs= [ [{'type' : 'xy'}, {'type' : 'indicator'}], [{'type' : 'xy'}, {'type' : 'indicator'}], [{'type' : 'xy'}, {'type' : 'indicator'}], ], #subplot_titles=('Mercedes', 'Ford', 'BMW') #specs=[ # [{}], # [{}], # [{}], # ] ) fig2.add_trace( go.Scatter(x=df.index, y=df.RSMA21dd, mode='lines', line_width=1, name='R_MMA21', line_color='orange') , row=1, col=1 ), fig2.add_trace( go.Indicator( mode = "gauge+number+delta", value = df.RSMA21dd[-1], #title = {'text': "Reversão MMA21"}, delta = {'reference': df.RSMA21dd.mean(), 'relative': True,'valueformat':'.2%'}, gauge={ 'axis':{ 'range':[math.floor(df.RSMA21dd.min()),math.ceil(df.RSMA21dd.max())], 'dtick': ( math.ceil(df.RSMA21dd.max()) - math.floor(df.RSMA21dd.min()) )/10, 'tickformat':'0.1f' }, 'steps' : [ {'range': [math.floor(df.RSMA21dd.min()), (math.floor(df.RSMA21dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"}, {'range': [(math.ceil(df.RSMA21dd.max())*0.5), math.ceil(df.RSMA21dd.max())], 'color': "rgba(200,50,50,0.55)"}], 'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 1, 'value': df.RSMA21dd.mean()}, 'bar': {'color': "black"} } ), row=1, col=2 ), fig2.add_trace( go.Scatter(x=df.index, y=df.RSMA50dd, mode='lines', line_width=1, name='R_MMA50', line_color='navy') , row=2, col=1 ) fig2.add_trace( go.Indicator( mode = "gauge+number+delta", value = df.RSMA50dd[-1], #title = {'text': "Reversão MMA50"}, delta = {'reference': df.RSMA50dd.mean(), 'relative': True, 'valueformat':'.2%'}, gauge={ 'axis':{ 'range':[math.floor(df.RSMA50dd.min()),math.ceil(df.RSMA50dd.max())], 'dtick': ( math.ceil(df.RSMA50dd.max()) - math.floor(df.RSMA50dd.min()) )/10, 'tickformat':'0.1f' }, 'steps' : [ {'range': [math.floor(df.RSMA50dd.min()), (math.floor(df.RSMA50dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"}, {'range': [(math.ceil(df.RSMA50dd.max())*0.5), math.ceil(df.RSMA50dd.max())], 'color': "rgba(200,50,50,0.55)"}], 'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 1, 'value': df.RSMA50dd.mean()}, 'bar': {'color': "black"} } ), row=2, col=2 ), fig2.add_trace( go.Scatter(x=df.index, y=df.REMA200dd, mode='lines', line_width=1, name='R_EMA200', line_color='purple') , row=3, col=1 ) fig2.add_trace( go.Indicator( mode = "gauge+number+delta", value = df.REMA200dd[-1], #title = {'text': "Reversão EMA200"}, delta = {'reference': df.REMA200dd.mean(), 'relative': True, 'valueformat':'.2%'}, gauge={ 'axis':{ 'range':[math.floor(df.REMA200dd.min()),math.ceil(df.REMA200dd.max())], 'dtick': ( math.ceil(df.REMA200dd.max()) - math.floor(df.REMA200dd.min()) )/10, 'tickformat':'0.1f' }, 'steps' : [ {'range': [math.floor(df.REMA200dd.min()), (math.floor(df.REMA200dd.min())*0.5)], 'color': "rgba(50,50,200,0.55)"}, {'range': [(math.ceil(df.REMA200dd.max())*0.5), math.ceil(df.REMA200dd.max())], 'color': "rgba(200,50,50,0.55)"}], 'threshold' : {'line': {'color': "red", 'width': 4}, 'thickness': 1, 'value': df.REMA200dd.mean()}, 'bar': {'color': "black"} } ), row=3, col=2 ), #fig2.add_hline(y=0, # line_color='black', line_dash='dot', line_width=1, # annotation_text="Centro da Média", # annotation_position="bottom left", # row=1, col=1,) ### FIG 3 --------------------------------------------------------------------------- fig3 = make_subplots( rows=1, cols=3, column_widths=[.33, .33, .33], subplot_titles=("MÉDIA vs RSMA21dd", "MÉDIA vs RSMA50dd", "MÉDIA vs REMA200dd"), ) fig3.add_trace( go.Scatter(name='', x=df.RSMA21dd, y=df.CSMA21dd, text=df.index.strftime("%d/%m/%Y"), mode='markers', marker=dict( size=7, color=df.RSMA21dd, #set color equal to a variable colorscale='Bluered', # one of plotly colorscales opacity=0.5, showscale=False), hovertemplate = "%{text} <br> RSMA21dd : %{x:.2f} </br> MÉDIA PREÇO : %{y:,.2f}" ), row=1, col=1 ) fig3.add_trace( go.Scatter(name='', x=df.RSMA50dd, y=df.CSMA50dd, text=df.index.strftime("%d/%m/%Y"), mode='markers', marker=dict( size=7, color=df.RSMA50dd, #set color equal to a variable colorscale='Bluered', # one of plotly colorscales opacity=0.5, showscale=False), hovertemplate = "%{text} <br> RSMA50dd : %{x:.2f} </br> MÉDIA PREÇO : %{y:,.2f}" ), row=1, col=2 ) fig3.add_trace( go.Scatter(name='', x=df.REMA200dd, y=df.CEMA200dd, text=df.index.strftime("%d/%m/%Y"), mode='markers', marker=dict( size=7, color=df.REMA200dd, #set color equal
<reponame>jasonb5/cdms<gh_stars>1-10 #!/usr/bin/env python """ A variable-like object extending over multiple tiles and time slices <NAME> and <NAME>, Tech-X Corp. (2011) This code is provided with the hope that it will be useful. No guarantee is provided whatsoever. Use at your own risk. """ import cdms2 from cdms2.MV2 import concatenate as MV2concatenate from cdms2.error import CDMSError from cdms2.hgrid import FileCurveGrid from cdms2.Cdunif import CdunifFile from cdms2.coord import FileAxis2D from cdms2.fvariable import FileVariable from cdms2.axis import FileAxis, TransientAxis from cdms2.axis import concatenate as axisConcatenate class TimeAggregatedFileVariable: """ Constructor Class for aggregating a time dependant variable across files. """ def __init__(self, gridIndex, listOfFVs, hostObj): """ Parameters ---------- gridIndex Index of requested grid listOfFVs List of cdms2.FileVariable hostObj For access to constants """ self.fvs = listOfFVs self.gridIndex = gridIndex self.hostObj = hostObj self.nTimeStepFiles = hostObj.nTimeSliceFiles * \ hostObj.nTimeDataFiles * hostObj.nGrids it = self.getTimeAxisIndex(self.fvs[0].getAxisList()) self.nTimeStepsPerFile = (self.fvs[0].shape)[it] self.nTimeStepsPerVariable = hostObj.nTimeSliceFiles * self.nTimeStepsPerFile def __call__(self, *args, **kwargs): """ Parameters ---------- *args cdms2 arguments kwargs cdms2 keywords Returns ------- sliced variable """ subsetList = [] for iFile in range(self.hostObj.nTimeSliceFiles): try: var = self.fvs[iFile](*args, **kwargs) subsetList.append(var) except Exception: pass newvar = self.createTransientVariableFromList(subsetList) return newvar def __getitem__(self, slc): """ Parameters ---------- slc Integer, slice or tuple of slices. If tuple 0 is time Returns ------- sliced variable """ if isinstance(slc, int): # return FileVariable return self.fvs[slc] elif isinstance(slc, tuple): # create TransientVariable # do we need to aggregate in time? nTSF = self.nTimeStepsPerFile axes = self.fvs[0].getAxisList() timeAxisIndex = self.getTimeAxisIndex(axes) if timeAxisIndex is None: CDMSError, "No time axis in :\n" + axes if isinstance(slc[timeAxisIndex], slice): (fileInds, timeStepInds) = self.getTimeFileIndex( slc[timeAxisIndex]) tv = self.createTransientVariableFromIndices( fileInds, timeStepInds) newslc = self.buildSlice(slc, tv.getAxisList()) return tv[newslc] elif isinstance(slc[timeAxisIndex], int): fileIndex = slc[timeAxisIndex] / nTSF timeIndex = slc[timeAxisIndex] % nTSF # Get just the file needed for the index slice requested. tv = self.createTransientVariableFromIndices( fileIndex, timeIndex) newslc = self.buildSlice(slc, axes) return tv[newslc] elif isinstance(slc, slice): (fileInds, timeStepInds) = self.getTimeFileIndex(slc) tv = self.createTransientVariableFromIndices( fileInds, timeStepInds) return tv def __len__(self): return len(self.fvs) def getTimeFileIndex(self, timeslc): """ Parameters ---------- index The time index requested Returns ------- the file index for a given time index """ # Loop over the number of time slices per file for the given variable and # the number of time steps per file. nTSF = self.nTimeStepsPerFile nTSV = self.nTimeStepsPerVariable timI1 = [] filI1 = [] timI2 = [] filI2 = [] if timeslc.step is None: step = 1 else: step = timeslc.step stop = timeslc.stop if timeslc.stop >= nTSV: stop = nTSV ii = [i / nTSF for i in range(timeslc.start, stop, step)] tt = [i % nTSF for i in range(timeslc.start, stop, step)] indx = 0 for i in ii: if indx == 0: timI1.append(tt[indx]) filI1.append(ii[indx]) else: if ii[indx] == ii[indx - 1]: timI1.append(tt[indx]) filI1.append(ii[indx]) else: timI2.append(timI1) filI2.append(filI1) timI1 = [] filI1 = [] timI1.append(tt[indx]) filI1.append(ii[indx]) indx += 1 filI2.append(filI1) timI2.append(timI1) return filI2, timI2 def getTimeAxisIndex(self, inAxes): """ Get the index for the time index Parameters ---------- inAxes The axes list where we want to find the time index Returns ------- the index - None if time not found """ for indx, axis in enumerate(inAxes): if axis.isTime(): return indx return None def buildSlice(self, inslc, inAxes): """ Build a slice where the global time is either removed (single time step requested) OR the all requested times are returned. This is based on the new variables time shape. Parameters ---------- inslc The original slice inAxes The input axis to search for the time axis Returns ------- newslc New slice with no time dimension or the time axis slice is (None, None, None) """ newslc = [] for cslc, axis in zip(inslc, inAxes): if axis.isTime(): if isinstance(cslc, int): # Omit slice - the new variable has only the shape of the # grid. continue else: newslc.append(slice(None, None, None)) else: newslc.append(cslc) return tuple(newslc) def buildAxes(self, timeAxis, inAxes): """ Construct the time axis based on the current time index and insert into proper location in the axes list Parameters ---------- timeAxis The Current time axis inAxes The current axes Returns ------- new axes """ axes = [] for axis in inAxes: if axis.isTime(): axes.append(timeAxis) else: axes.append(axis) return axes def createTransientVariableFromList(self, tvList): """ Aggregate a sliced/subseted list of transient variables. Parameters ---------- tvlist List of subset/sliced transient variables. Returns ------- aggregated transient variable """ outvar = tvList[0] if len(tvList) > 1: for varIndex in range(1, len(tvList)): new = MV2concatenate((outvar, tvList[varIndex])) outvar = new return outvar def createTransientVariableFromIndices(self, fileIndices, timeIndices): """ Aggregate a time file variable. Start and End Indices use slice notation. Parameters ---------- fileIndices the file indices to aggregate across timeIndices which time steps with in each file Returns ------- aggregated time dep. variable. Has shape of full grid. Subset the grid after exiting. """ from numpy import reshape firsttime = True nTSF = self.nTimeStepsPerFile if not isinstance(fileIndices, int): for files, times in zip(fileIndices, timeIndices): for indx, file in enumerate(files): # Should make these slices. cvar = self.fvs[file][times[indx]] grid = self.fvs[file].getGrid() atts = cvar.attributes # Insert the new time axis. axisTime = self.fvs[file].getTime() timeAxis = TransientAxis([file * nTSF + times[indx]], attributes=axisTime.attributes, id=axisTime.id) axes = self.buildAxes( timeAxis, self.fvs[file].getAxisList()) # shape --> tm1.shape = (1, :, :) tm1 = reshape(cvar, tuple([1] + list(cvar.shape))) # Attach needed items var = cdms2.createVariable(tm1, axes=axes, grid=grid, attributes=atts, id=cvar.standard_name) # Create cdms2 transient variable if firsttime: new = var firsttime = False else: # insert the new time axis. taA = new.getTime() newTime = axisConcatenate((taA, timeAxis), attributes=axisTime.attributes, id=axisTime.id) axes = self.buildAxes( newTime, self.fvs[file].getAxisList()) tmp = MV2concatenate((new, var)) new = cdms2.createVariable(tmp, axes=axes, grid=grid, attributes=atts, id=cvar.standard_name) else: new = self.fvs[fileIndices][timeIndices] return new class TimeFileVariable: """ Construct an aggregated time dependant variable. """ def __init__(self, hostObj, varName): """ Create a list of file variable with grid attached Parameters ---------- hostObj The host object opened by gsHost varName the variable name to be returned """ self.id = varName self.vars = [] mode = hostObj.mode for gridIndex in range(hostObj.nGrids): # Get the filenames aa = list(hostObj.gridVars.keys()) gn = hostObj.gridVars[aa[0]][gridIndex] g = CdunifFile(gn, mode) vars = [] for timeFileIndex in range(hostObj.nTimeDataFiles): # Open the files fn = hostObj.timeVars[varName][gridIndex][timeFileIndex] # Need f and u because they serve slightly different purposes f = cdms2.open(fn, mode) # f.axes exists while axes is not a part of u u = CdunifFile(fn, mode) # u.variables[varName].gridIndex = gridIndex # Turn the coordinates into a list if hasattr(u.variables[varName], "coordinates"): coords = u.variables[varName].coordinates.split() # coords1d = f._convention_.getAxisIds(u.variables) # coordsaux = f._convention_.getAxisAuxIds(u.variables, coords1d) # Convert the variable into a FileVariable f.variables[varName] = FileVariable( f, varName, u.variables[varName]) # Add the coordinates to the file for coord in coords: f.variables[coord] = g.variables[coord] f.variables[coord] = FileAxis2D( f, coord, g.variables[coord]) # Build the axes for key in list(f.axes.keys()): f.axes[key] = FileAxis(f, key, None) # Set the boundaries for coord in coords: bounds = f._convention_.getVariableBounds( f, f.variables[coord]) f.variables[coord].setBounds(bounds) # Initialize the domain for var in list(f.variables.values()): var.initDomain(f.axes) # Add the grid gridkey, lat, lon = f.variables[varName].generateGridkey( f._convention_, f.variables) gridname = ("grid%d_" % gridIndex) + "%dx%d" % lat.shape # grid = FileGenericGrid(lat, lon, gridname, parent = f, maskvar = None) grid = FileCurveGrid( lat, lon, gridname, parent=f, maskvar=None) f.variables[varName]._grid_ = grid vars.append(f.variables[varName]) tafv = TimeAggregatedFileVariable(gridIndex, vars, hostObj) self.vars.append(tafv) self._repr_string = "TimeFileVariable" def listall(self, all=None): """ Gain access to cdms2 listall method. Requires a TimeFileVariable Parameters ---------- all Returns ------- list """ return self[0][0].listall(all=all) def showall(self, all=None, device=None): """ Gain access to cdms2 showall method Requires a TimeFileVariable Parameters ---------- all : device : Returns ------- list : """ return self[0][0][:].showall(all=all, device=device) def __getitem__(self, gridIndex): """ Parameters ---------- gridIndex gridIndex """ return self.vars[gridIndex] # ############################################################################## # ############# DEPRECIATED - Testing required to fully remove ########### # ############################################################################## class TimeTransientVariable: def __init__(self, hostObj, varName, **slicekwargs): """ Constructor Parameters ---------- hostObj host object varName variable name slicekwargs eg lon=(-180,180), lat=(-90,90), time=5 cf Packages/cdms2/Lib/cudsinterface.py for a list of keywords """ # TimeVariable(self, hostObj, varName) self.id = varName self.vars = [] gridFilenames = hostObj.getGridFilenames() kwargs = {} for k in list(slicekwargs.keys()): kwargs[k.lower()] = slicekwargs[k] # time
score for target prioritisation of genes with associations with your disease of interest. Features that contribute to the overall score include detection of association and association odds-ratio from a variety of studies (**OpenTargets** and the **PheWAS catalog**), network degree in protein-protein interaction networks (**STRING**), and level of druggability (**Pharos**). ") try: # extract diseases disease = ["--"] for i in list(set(df_selected["gwas-associations"].tolist())): disease.extend(i.split(", ")) disease.extend(df_selected["phewas phenotype"].tolist()) disease = sorted(list(set(disease))) # sidebar -- disease select box select_disease = st.sidebar.selectbox('Disease', disease, key='11') select_disease = select_disease.split(" (")[0] # sidebar -- p-value slider select_p = st.sidebar.text_input(label = "P-value", help = "Defaults to p = 0.05. Accepts scientific notation, e.g., 5E-4, 3e-9", value = "0.05") try: if (float(select_p) <= 1) & (float(select_p) > 0): select_p = float(select_p) else: select_p = 0.05 except: select_p = 0.05 df_selected = df_selected[df_selected["p-value"] <= select_p] # opentargets gene - disease association score try: df_gene_score = opentargets_gene_score(disease_name= select_disease) #st.write(df_gene_score) # add the association score to phewas dataset df_selected = pd.merge(df_selected, df_gene_score, left_on='gene_name', right_on = "gene_symbol", how='left') df_selected['opentargets_associations'] = df_selected['opentargets_associations'].fillna(0) except Exception: df_selected['opentargets_associations'] = 0 #st.write(df_selected['opentargets_associations']) # subset the data frame for the disease appearing in either PheWAS and GWAS df_disease_phewas_or_gwas = df_selected[df_selected["phewas phenotype"].str.contains(select_disease) | df_selected["gwas-associations"].str.contains(select_disease)] df_disease_phewas_or_gwas.sort_values(by=['odds-ratio'], inplace=True, ascending=False) df_disease_phewas_or_gwas = df_disease_phewas_or_gwas[["gene_name", "snp", "phewas phenotype", "odds-ratio", "p-value", "gwas-associations", "opentargets_associations"]].reset_index().drop("index", axis= 1) # add phewas and gwas interaction indicator score look for the genes in the intersection of phewas and gwas association df_disease_phewas_or_gwas = df_disease_phewas_or_gwas.assign(indicator_Phe_GWAS = np.where(df_disease_phewas_or_gwas["phewas phenotype"].str.contains(select_disease) & df_disease_phewas_or_gwas["gwas-associations"].str.contains(select_disease), 1, 0)) # look for evidence of druggability # druggable evidence df_disease_phewas_or_gwas = pd.merge(df_disease_phewas_or_gwas, df_druggable, left_on='gene_name', right_on = "sym", how='left') # subset the data by odds ratio df_disease_phewas_or_gwas = df_disease_phewas_or_gwas.reset_index().drop("index", axis= 1) df_disease_phewas_or_gwas_des = df_disease_phewas_or_gwas[df_disease_phewas_or_gwas["odds-ratio"] >= 1] df_disease_phewas_or_gwas_pro = df_disease_phewas_or_gwas[df_disease_phewas_or_gwas["odds-ratio"] < 1] # add druggable score # risk gene df_tdl_des = df_disease_phewas_or_gwas_des[["gene_name", "tdl"]] df_tdl_des =df_tdl_des.dropna() df_tdl_des = df_tdl_des.drop_duplicates(keep = 'first').reset_index().drop('index', axis=1) df_tdl_des = df_tdl_des[df_tdl_des["tdl"] != 'Tdark'] df_tdl_des_degree = df_tdl_des.groupby(["gene_name"]).size().reset_index(name='druggability_score') df_disease_phewas_or_gwas_des = pd.merge(df_disease_phewas_or_gwas_des, df_tdl_des_degree, left_on='gene_name', right_on = "gene_name", how='left') df_disease_phewas_or_gwas_des["druggability_score"] = df_disease_phewas_or_gwas_des["druggability_score"].fillna(0) # protective gene df_tdl_pro = df_disease_phewas_or_gwas_pro[["gene_name", "tdl"]] df_tdl_pro =df_tdl_pro.dropna() df_tdl_pro = df_tdl_pro.drop_duplicates(keep = 'first').reset_index().drop('index', axis=1) df_tdl_pro = df_tdl_pro[df_tdl_pro["tdl"] != 'Tdark'] df_tdl_pro_degree = df_tdl_pro.groupby(["gene_name"]).size().reset_index(name='druggability_score') df_disease_phewas_or_gwas_pro = pd.merge(df_disease_phewas_or_gwas_pro, df_tdl_pro_degree, left_on='gene_name', right_on = "gene_name", how='left') df_disease_phewas_or_gwas_pro["druggability_score"] = df_disease_phewas_or_gwas_pro["druggability_score"].fillna(0) # overall gene df_tdl_all = df_disease_phewas_or_gwas[["gene_name", "tdl"]] df_tdl_all =df_tdl_all.dropna() df_tdl_all = df_tdl_all.drop_duplicates(keep = 'first').reset_index().drop('index', axis=1) df_tdl_all = df_tdl_all[df_tdl_all["tdl"] != 'Tdark'] df_tdl_all_degree = df_tdl_all.groupby(["gene_name"]).size().reset_index(name='druggability_score') df_disease_phewas_or_gwas = pd.merge(df_disease_phewas_or_gwas, df_tdl_all_degree, left_on='gene_name', right_on = "gene_name", how='left') df_disease_phewas_or_gwas["druggability_score"] = df_disease_phewas_or_gwas["druggability_score"].fillna(0) # protein-protein network (des genes) gene_set = df_disease_phewas_or_gwas_des["gene_name"].unique().tolist() g_des=net.Network() try: df_protein_interaction = proteins_interaction(input_protein= gene_set) # compute the degree of nodes frequency_a = collections.Counter(df_protein_interaction["preferredName_A"].tolist()) dict_frequency_a = dict(frequency_a) frequency_b = collections.Counter(df_protein_interaction["preferredName_B"].tolist()) dict_frequency_b = dict(frequency_b) gene_degree = {} for i in gene_set: degree_a = 0 if dict_frequency_a.get(i) is None else dict_frequency_a.get(i) degree_b = 0 if dict_frequency_b.get(i) is None else dict_frequency_b.get(i) gene_degree[i] = degree_a + degree_b df_gene_degree = pd.DataFrame(gene_degree.items(), columns=['gene', 'networkDegree_score']) # add degree score df_disease_phewas_or_gwas_des = pd.merge(df_disease_phewas_or_gwas_des, df_gene_degree, left_on='gene_name', right_on = "gene", how='left') # generate PPI network for i in df_protein_interaction.index: g_des.add_node(df_protein_interaction["preferredName_A"][i], color = "#FF7F7F") g_des.add_node(df_protein_interaction["preferredName_B"][i], color = "#FF7F7F") g_des.add_edge(df_protein_interaction["preferredName_A"][i],df_protein_interaction["preferredName_B"][i], weight= df_protein_interaction["score"][i]) except Exception: df_disease_phewas_or_gwas_des['networkDegree_score'] = 0 pass # protein-protein network (pro genes) gene_set = df_disease_phewas_or_gwas_pro["gene_name"].unique().tolist() g_pro=net.Network() try: df_protein_interaction = proteins_interaction(input_protein= gene_set) # compute the degree of nodes frequency_a = collections.Counter(df_protein_interaction["preferredName_A"].tolist()) dict_frequency_a = dict(frequency_a) frequency_b = collections.Counter(df_protein_interaction["preferredName_B"].tolist()) dict_frequency_b = dict(frequency_b) gene_degree = {} for i in gene_set: degree_a = 0 if dict_frequency_a.get(i) is None else dict_frequency_a.get(i) degree_b = 0 if dict_frequency_b.get(i) is None else dict_frequency_b.get(i) gene_degree[i] = degree_a + degree_b df_gene_degree = pd.DataFrame(gene_degree.items(), columns=['gene', 'networkDegree_score']) # add degree score df_disease_phewas_or_gwas_pro = pd.merge(df_disease_phewas_or_gwas_pro, df_gene_degree, left_on='gene_name', right_on = "gene", how='left') # generate PPI network for i in df_protein_interaction.index: g_pro.add_node(df_protein_interaction["preferredName_A"][i], color = "#45b6fe") g_pro.add_node(df_protein_interaction["preferredName_B"][i], color = "#45b6fe") g_pro.add_edge(df_protein_interaction["preferredName_A"][i],df_protein_interaction["preferredName_B"][i], weight= df_protein_interaction["score"][i]) except Exception: df_disease_phewas_or_gwas_pro['networkDegree_score'] = 0 pass # protein-protein network (overall genes) gene_set = df_disease_phewas_or_gwas["gene_name"].unique().tolist() g_all=net.Network() try: df_protein_interaction = proteins_interaction(input_protein= gene_set) # compute the degree of nodes frequency_a = collections.Counter(df_protein_interaction["preferredName_A"].tolist()) dict_frequency_a = dict(frequency_a) frequency_b = collections.Counter(df_protein_interaction["preferredName_B"].tolist()) dict_frequency_b = dict(frequency_b) gene_degree = {} for i in gene_set: degree_a = 0 if dict_frequency_a.get(i) is None else dict_frequency_a.get(i) degree_b = 0 if dict_frequency_b.get(i) is None else dict_frequency_b.get(i) gene_degree[i] = degree_a + degree_b df_gene_degree = pd.DataFrame(gene_degree.items(), columns=['gene', 'networkDegree_score']) # add degree score df_disease_phewas_or_gwas = pd.merge(df_disease_phewas_or_gwas, df_gene_degree, left_on='gene_name', right_on = "gene", how='left') # generate PPI network for i in df_protein_interaction.index: g_all.add_node(df_protein_interaction["preferredName_A"][i], color = "#45b6fe") g_all.add_node(df_protein_interaction["preferredName_B"][i], color = "#45b6fe") g_all.add_edge(df_protein_interaction["preferredName_A"][i],df_protein_interaction["preferredName_B"][i], weight= df_protein_interaction["score"][i]) except Exception: df_disease_phewas_or_gwas['networkDegree_score'] = 0 pass # option to remove features from framework featuresList = ["odds-ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"] feature_remove = st.sidebar.multiselect("Remove: ", featuresList, key='2') if len(feature_remove) > 0: featuresList = [ele for ele in featuresList if ele not in feature_remove] weight = 1/len(featuresList) # group the data by gene and normalize features (risk) df_disease_phewas_or_gwas_des = df_disease_phewas_or_gwas_des[["gene_name", "odds-ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] df_disease_phewas_or_gwas_des = df_disease_phewas_or_gwas_des.groupby(["gene_name"]).mean() # normalize by features scaler = preprocessing.MinMaxScaler() names = df_disease_phewas_or_gwas_des.columns ind = df_disease_phewas_or_gwas_des.index d_des = scaler.fit_transform(df_disease_phewas_or_gwas_des) df_disease_phewas_or_gwas_des_norm = pd.DataFrame(d_des, columns = names, index= ind) #df_disease_phewas_or_gwas_des_norm["overall score"] = weight * (df_disease_phewas_or_gwas_des_norm["odds-ratio"] + df_disease_phewas_or_gwas_des_norm["opentargets_associations"] + df_disease_phewas_or_gwas_des_norm["indicator_Phe_GWAS"] + df_disease_phewas_or_gwas_des_norm["druggability_score"] + df_disease_phewas_or_gwas_des_norm["networkDegree_score"]) featuresList_des = list(featuresList) df_disease_phewas_or_gwas_des_norm = df_disease_phewas_or_gwas_des_norm[featuresList_des] df_disease_phewas_or_gwas_des_norm["overall score"] = weight * df_disease_phewas_or_gwas_des_norm.sum(axis = 1, skipna = True) #df_disease_phewas_or_gwas_des_norm = df_disease_phewas_or_gwas_des_norm[["overall score", "odds-ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] featuresList_des.insert(0,"overall score") df_disease_phewas_or_gwas_des_norm = df_disease_phewas_or_gwas_des_norm[featuresList_des] df_disease_phewas_or_gwas_des_norm.sort_values(by=['overall score'], inplace=True, ascending=False) # group the data by gene and normalize features (protective) df_disease_phewas_or_gwas_pro["1-odds_ratio"] = 1- df_disease_phewas_or_gwas_pro["odds-ratio"] featuresList_pro =['1-odds_ratio' if i =='odds-ratio' else i for i in featuresList] df_disease_phewas_or_gwas_pro = df_disease_phewas_or_gwas_pro[["gene_name", "1-odds_ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] df_disease_phewas_or_gwas_pro = df_disease_phewas_or_gwas_pro.groupby(["gene_name"]).mean() # normalize by features scaler = preprocessing.MinMaxScaler() names = df_disease_phewas_or_gwas_pro.columns ind = df_disease_phewas_or_gwas_pro.index d_pro = scaler.fit_transform(df_disease_phewas_or_gwas_pro) df_disease_phewas_or_gwas_pro_norm = pd.DataFrame(d_pro, columns = names, index= ind) #df_disease_phewas_or_gwas_pro_norm["overall score"] = weight * (df_disease_phewas_or_gwas_pro_norm["1-odds_ratio"] + df_disease_phewas_or_gwas_pro_norm["opentargets_associations"] + df_disease_phewas_or_gwas_pro_norm["indicator_Phe_GWAS"] + df_disease_phewas_or_gwas_pro_norm["druggability_score"] + df_disease_phewas_or_gwas_pro_norm["networkDegree_score"]) df_disease_phewas_or_gwas_pro_norm = df_disease_phewas_or_gwas_pro_norm[featuresList_pro] df_disease_phewas_or_gwas_pro_norm["overall score"] = weight * df_disease_phewas_or_gwas_pro_norm.sum(axis = 1, skipna = True) #df_disease_phewas_or_gwas_pro_norm = df_disease_phewas_or_gwas_pro_norm[["overall score", "1-odds_ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] featuresList_pro.insert(0,"overall score") df_disease_phewas_or_gwas_pro_norm = df_disease_phewas_or_gwas_pro_norm[featuresList_pro] df_disease_phewas_or_gwas_pro_norm.sort_values(by=['overall score'], inplace=True, ascending=False) df_disease_phewas_or_gwas_pro_norm = df_disease_phewas_or_gwas_pro_norm.rename(columns = {"overall score": "StarGazer score"}) # group the data by gene and normalize features (overall) df_disease_phewas_or_gwas = df_disease_phewas_or_gwas[["gene_name", "odds-ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] df_disease_phewas_or_gwas = df_disease_phewas_or_gwas.groupby(["gene_name"]).mean() # normalize by features scaler = preprocessing.MinMaxScaler() names = df_disease_phewas_or_gwas.columns ind = df_disease_phewas_or_gwas.index d_des = scaler.fit_transform(df_disease_phewas_or_gwas) df_disease_phewas_or_gwas_norm = pd.DataFrame(d_des, columns = names, index= ind) #df_disease_phewas_or_gwas_norm["overall score"] = weight * (df_disease_phewas_or_gwas_norm["odds-ratio"] + df_disease_phewas_or_gwas_norm["opentargets_associations"] + df_disease_phewas_or_gwas_norm["indicator_Phe_GWAS"] + df_disease_phewas_or_gwas_norm["druggability_score"] + df_disease_phewas_or_gwas_norm["networkDegree_score"]) featuresList_all = list(featuresList) df_disease_phewas_or_gwas_norm = df_disease_phewas_or_gwas_norm[featuresList_all] df_disease_phewas_or_gwas_norm["overall score"] = weight * df_disease_phewas_or_gwas_norm.sum(axis = 1, skipna = True) #df_disease_phewas_or_gwas_norm = df_disease_phewas_or_gwas_norm[["overall score", "odds-ratio", "opentargets_associations", "indicator_Phe_GWAS", "druggability_score", "networkDegree_score"]] featuresList_all.insert(0,"overall score") df_disease_phewas_or_gwas_norm = df_disease_phewas_or_gwas_norm[featuresList_all] df_disease_phewas_or_gwas_norm.sort_values(by=['overall score'], inplace=True, ascending=False) df_disease_phewas_or_gwas_norm = df_disease_phewas_or_gwas_norm.rename(columns = {"overall score": "StarGazer score"}) st.header("Disease: " + "*" + select_disease + "*" + ", P-value <= " + "*" + str(round(select_p, 4)) + "*") # target prioritization data frame st.subheader("Overall target prioritization: " + "*" + str(len(df_disease_phewas_or_gwas_norm)) + "* genes") with st.container(): st.markdown(get_table_download_link(df_disease_phewas_or_gwas_norm.reset_index()), unsafe_allow_html=True) st.dataframe(df_disease_phewas_or_gwas_norm, width = 1100) st.subheader("Risk allele target prioritization: " + "*" + str(len(df_disease_phewas_or_gwas_des_norm)) + "* genes") with st.container(): st.markdown(get_table_download_link(df_disease_phewas_or_gwas_des_norm.reset_index()), unsafe_allow_html=True) st.dataframe(df_disease_phewas_or_gwas_des_norm, width = 1100) st.subheader("Protective allele target prioritization: " + "*" + str(len(df_disease_phewas_or_gwas_pro_norm)) + "* genes") with st.container(): st.markdown(get_table_download_link(df_disease_phewas_or_gwas_pro_norm.reset_index()), unsafe_allow_html=True) st.dataframe(df_disease_phewas_or_gwas_pro_norm, width = 1100) except: st.subheader("No data found. Please try selecting another disease!") cache = "" placeholder = st.sidebar.empty() input = placeholder.text_input("Search NCBI for your gene of interest") if input != cache: gene_of_interest_URL = "https://www.ncbi.nlm.nih.gov/gene/?term=" + input + "+AND+human[orgn]" webbrowser.open(gene_of_interest_URL) cache = input else: st.markdown("StarGazer is a multi-omics pipeline which integrates several datasets to provide insights into therapeutic target prioritisation. We have integrated data from [OpenTargets](https://www.opentargets.org/) and the [PheWAS catalog](https://phewascatalog.org/phewas) (gene variant risk associations with phenotypic variants), [Pharos](https://pharos.nih.gov/) (druggability of gene target), and [STRING](https://string-db.org/) (protein-protein interaction).") # extract GWAS diseases disease = [] for i in list(set(df_selected["gwas-associations"].tolist())): disease.extend(i.split(", ")) disease = sorted(list(set(disease))) num_gwas = len(disease) # extract phewas diseases disease = [] disease.extend(df_selected["phewas phenotype"].tolist()) disease = sorted(list(set(disease))) num_phewas = len(disease) disease = [] for i in list(set(df_selected["gwas-associations"].tolist())): disease.extend(i.split(", ")) disease.extend(df_selected["phewas phenotype"].tolist()) disease = sorted(list(set(disease))) num_phewas_gwas = len(disease)
I', 'type': 'line', 'wavelengths': [[0.7699,0.7665],[1.169,1.177],[1.244,1.252]]}, \ 'ki': {'label': r'K I', 'type': 'line', 'wavelengths': [[0.7699,0.7665],[1.169,1.177],[1.244,1.252]]}, \ 'k1': {'label': r'K I', 'type': 'line', 'wavelengths': [[0.7699,0.7665],[1.169,1.177],[1.244,1.252]]}} features = kwargs.get('features',[]) if not isinstance(features,list): features = [features] if (kwargs.get('ldwarf',False) or kwargs.get('mdwarf',False)): features.extend(['k','na','feh','tio','co','h2o','h2']) if (kwargs.get('tdwarf',False)): features.extend(['k','ch4','h2o','h2']) if (kwargs.get('young',False)): features.extend(['vo']) if (kwargs.get('binary',False)): features.extend(['sb']) # clean repeats while maintaining order - set does not do this fea = [] for i in features: if i not in fea: fea.append(i) features = fea # error check - make sure you're plotting something if (len(args) < 1): print('plotSpectrum needs at least one Spectrum object to plot') return # if a list is passed, use this list elif (len(args) == 1 and isinstance(args[0],list)): splist = args[0] # if a set of objects is passed, turn into a list else: splist = [] for a in args: if isinstance(a,Spectrum): # a spectrum object splist.append(a) elif isinstance(a,list): splist.append(a) else: print('\nplotSpectrum: Ignoring input object {} as it is neither a Spectrum object nor a list\n\n'.format(a)) # set up for multiplot if (len(splist) == 1): multiplot = False # array of lists => force multiplot elif (len(splist) > 1 and isinstance(splist[0],list)): multiplot = True # reformat array of spectra of multiplot is used (i.e., user forgot to set) if multiplot == True and isinstance(splist[0],Spectrum): splist = [[s] for s in splist] elif multiplot == False and isinstance(splist[0],Spectrum): splist = [splist] # flatten array if multiplot is not set elif multiplot == False and isinstance(splist[0],list) and len(splist) > 1: splist = [[item for sublist in splist for item in sublist]] # flatten tot_sp = len([item for sublist in splist for item in sublist]) # Total number of spectra # prep legend legend = kwargs.get('legend',[str() for x in numpy.arange(tot_sp)]) legend = kwargs.get('legends',legend) legend = kwargs.get('label',legend) legend = kwargs.get('labels',legend) if not isinstance(legend,list): legend = [legend] if(len(legend) < tot_sp): legend.extend([str() for x in numpy.arange(tot_sp-len(legend))]) legendLocation = kwargs.get('legendLocation','upper right') # sets legend location legendLocation = kwargs.get('labelLocation',legendLocation) # sets legend location # now run a loop through the input subarrays plt.close('all') # set up here for multiple file output nplot = 1 if multipage == True or multiplot == True: nplot = multilayout[0]*multilayout[1] numpages = int(len(splist) / nplot) + 1 if (len(splist) % nplot == 0): numpages -= 1 fig = [] if multipage == True and filetype == 'pdf': pdf_pages = PdfPages(filename) if multipage == False: if len(splist) > 1: files = [filebase+'{}.'.format(i+1)+filetype for i in numpy.arange(len(splist))] else: files = [filebase+'.'+filetype] pg_n = 0 # page counter plt_n = 0 # plot per page counter lg_n = 0 # legend per plot counter for plts,sp in enumerate(splist): # set specific plot parameters # print(sp[0],Spectrum,isinstance(sp[0],Spectrum)) if not isinstance(sp[0],Spectrum): raise ValueError('\nInput to plotSpectrum has wrong format:\n\n{}\n\n'.format(sp[0])) zeropoint = kwargs.get('zeropoint',[0. for x in numpy.arange(len(sp))]) # settings that work if the spectrum was read in as legitmate Spectrum object try: xlabel = kwargs.get('xlabel','{} ({})'.format(sp[0].wlabel,sp[0].wunit)) ylabel = kwargs.get('ylabel','{} {} ({})'.format(sp[0].fscale,sp[0].flabel,sp[0].funit)) except: xlabel = kwargs.get('xlabel','Wavelength (unknown units)') ylabel = kwargs.get('ylabel','Flux (unknown units)') xrange = kwargs.get('xrange',[0.85,2.42]) bound = xrange ymax = [s.fluxMax().value for s in sp] yrng = kwargs.get('yrange',map(lambda x: x*numpy.nanmax(ymax)+numpy.nanmax(zeropoint),[-0.02,1.2])) bound.extend(yrng) linestyle = kwargs.get('linestyle',['steps' for x in numpy.arange(len(sp))]) linestyle = kwargs.get('linestyles',linestyle) if (len(linestyle) < len(sp)): linestyle.extend(['steps' for x in numpy.arange(len(sp)-len(linestyle))]) # colors # by default all black lines colors = kwargs.get('colors',['k' for x in numpy.arange(len(sp))]) colors = kwargs.get('color',colors) if not isinstance(colors,list): colors = [colors] if (len(colors) < len(sp)): while len(colors) < len(sp): colors.extend(colors[-1]) colorScheme = kwargs.get('colorScheme',None) colorScheme = kwargs.get('colorMap',colorScheme) if (colorScheme != None): values = numpy.arange(len(sp)) color_map = plt.get_cmap(colorScheme) norm = colmap.Normalize(vmin=0, vmax=1.0*values[-1]) scalarMap = cm.ScalarMappable(norm=norm, cmap=color_map) for i in numpy.arange(len(sp)): colors[i] = scalarMap.to_rgba(values[i]) colorsUnc = kwargs.get('colorsUnc',colors) colorsUnc = kwargs.get('colorUnc',colorsUnc) if (len(colorsUnc) < len(sp)): while len(colorsUnc) < len(sp): colorsUnc.extend(colors[-1]) # show uncertainties showNoise = kwargs.get('showNoise',[False for x in numpy.arange(len(sp))]) showNoise = kwargs.get('noise',showNoise) showNoise = kwargs.get('uncertainty',showNoise) if not isinstance(showNoise, list): showNoise = [showNoise] if (len(showNoise) < len(sp)): showNoise.extend([True for x in numpy.arange(len(sp)-len(showNoise))]) # zero points - by default true showZero = kwargs.get('showZero',[True for x in numpy.arange(len(sp))]) if not isinstance(showZero, list): showZero = [showZero] if (len(showZero) < len(sp)): while len(showZero) < len(sp): showZero.extend(showZero[-1]) # GENERATE PLOTS if (multiplot == True or multipage == True): plt_n = plts % nplot if (plt_n == 0):# and plts != len(splist)): # ax = range(nplot) # t = tuple([tuple([i+b*multilayout[1] for i in range(multilayout[1])]) for b in range(multilayout[0])]) # fig[pg_n], ax = plt.subplots(multilayout[0], multilayout[1], sharex = True, sharey = True) # # NOTE THE FOLLOWING LINE IS HAVING PROBLEMS IN PYTHON3 # # fig.append(plt.figure()) pg_n += 1 ax = fig[pg_n-1].add_subplot(multilayout[0], multilayout[1], plt_n+1) # plotting a single plot with all spectra else: plt.close('all') # ax = range(1) plt_n = 0 fig = [] if (kwargs.get('figsize') != None): fig.append(plt.figure(figsize = kwargs.get('figsize'))) else: fig.append(plt.figure()) ax = fig[0].add_subplot(111) for ii, a in enumerate(sp): flx = [i+zeropoint[ii] for i in a.flux.value] #stack if stack > 0: flx = [f + (len(sp)-ii-1)*stack for f in flx] # if kwargs.get('yrange') == None: # bound[3] = bound[3] + stack ax.plot(a.wave.value,flx,color=colors[ii],linestyle=linestyle[ii], zorder = 10, label = legend[lg_n]) # add comparison if comparison != False: colorComparison = kwargs.get('colorComparison',colors[0]) linestyleComparison = kwargs.get('linestyleComparison',linestyle[0]) cflx = [i+zeropoint[ii] for i in comparison.flux.value] if stack > 0: cflx = [f + (len(sp)-ii-1)*stack for f in cflx] ax.plot(comparison.wave.value,cflx,color=colorComparison,linestyle=linestyleComparison,alpha=0.5, zorder = 10) # add residual if residual == True and len(sp) == 2: # Save flux values from first spectrum if ii == 0: flx0 = [f - (len(sp)-ii-1)*stack for f in flx] # Subtract fluxes and plot elif ii == 1: res = [flx0[f_n] - f for f_n, f in enumerate(flx)] ax.plot(a.wave.value, res, alpha = 0.3, color = 'g') # Fix bound[2] if res goes below 0 if min(res) < 0: b0 = numpy.argmax(a.wave.value > bound[0]) b1 = numpy.argmin(a.wave.value < bound[1]) bound[2] = bound[2] + min(res[b0:b1]) # noise if (showNoise[ii]): ns = [i+zeropoint[ii] for i in a.noise.value] ax.plot(a.wave.value,ns,color=colorsUnc[ii],linestyle=linestyle[ii],alpha=0.3, zorder = 10) # zeropoint if (showZero[ii]): ze = numpy.ones(len(a.flux))*zeropoint[ii] ax.plot(a.wave.value,ze,color=colors[ii],linestyle=':',alpha=0.3, zorder = 10) # determine maximum flux for all spectra f = interp1d(a.wave,flx,bounds_error=False,fill_value=0.) if (ii == 0): wvmax = numpy.arange(bound[0],bound[1],0.001) flxmax = f(wvmax) else: flxmax = numpy.maximum(flxmax,f(wvmax)) # legend counter lg_n = lg_n + 1 # Increment lg_n # label features # THIS NEEDS TO BE FIXED WITH GRETEL'S STUFF yoff = 0.02*(bound[3]-bound[2]) fontsize = 10-numpy.min([(multilayout[0]*multilayout[1]-1),6]) for ftr in features: ftr = ftr.lower() if ftr in feature_labels: for ii,waveRng in enumerate(feature_labels[ftr]['wavelengths']): if (numpy.min(waveRng) > bound[0] and numpy.max(waveRng) < bound[1]): x = (numpy.arange(0,nsamples+1.0)/nsamples)* \ (numpy.nanmax(waveRng)-numpy.nanmin(waveRng)+0.04)+numpy.nanmin(waveRng)-0.02 f = interp1d(wvmax,flxmax,bounds_error=False,fill_value=0.) y = numpy.nanmax(f(x))+0.5*yoff if feature_labels[ftr]['type'] == 'band': ax.plot(waveRng,[y+yoff]*2,color='k',linestyle='-') ax.plot([waveRng[0]]*2,[y,y+yoff],color='k',linestyle='-') ax.text(numpy.mean(waveRng),y+1.5*yoff,feature_labels[ftr]['label'],horizontalalignment='center',fontsize=fontsize) else: for w in waveRng: ax.plot([w]*2,[y,y+yoff],color='k',linestyle='-') ax.text(numpy.mean(waveRng),y+1.5*yoff,feature_labels[ftr]['label'],horizontalalignment='center',fontsize=fontsize) waveRng = [waveRng[0]-0.02,waveRng[1]+0.02] # for overlap # update offset foff = [y+3*yoff if (w >= waveRng[0] and w <= waveRng[1]) else 0 for w in wvmax] flxmax = [numpy.max([xx,yy]) for xx, yy in zip(flxmax, foff)] bound[3] = numpy.max([numpy.max(flxmax)+1.*yoff,bound[3]]) ax.axis(bound) # grid if (grid): ax.grid() # axis labels fontsize = (13-numpy.min([(multilayout[0]*multilayout[1]-1),8])) * fontscale # Added in fontscale legendfontsize = (13-numpy.min([(multilayout[0]*multilayout[1]-1),8])) * legendfontscale # Added in fontscale ax.set_xlabel(xlabel, fontsize = fontsize) ax.set_ylabel(ylabel, fontsize = fontsize) ax.tick_params(axis='x', labelsize=fontsize) ax.tick_params(axis='y', labelsize=fontsize) # place legend if len(legend) > 0: if legendLocation == 'outside': box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.15, box.width * 0.7, box.height * 0.7]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':legendfontsize}) else: ax.legend(loc=legendLocation, prop={'size':legendfontsize}) bound[3] = bound[3]+0.1*(bound[3]-bound[2]) # extend axis for in-plot legends ax.axis(bound) # overplot telluric absorption if (kwargs.get('telluric',False) == True): twv = [[1.1,1.2],[1.3,1.5],[1.75,2.0]] for waveRng in twv: rect = patches.Rectangle((waveRng[0],bound[2]),waveRng[1]-waveRng[0],bound[3]-bound[2],facecolor='0.95',alpha=0.2,color='0.95') ax.add_patch(rect) ax.text(numpy.mean(waveRng),bound[2]+3*yoff,r'$\oplus$',horizontalalignment='center',fontsize=fontsize) # place inset - RIGHT NOW ONLY SETTING LIMITS WITH FIRST SPECTRUM IN LIST if inset == True and inset_xrange != False: ax_inset = fig[pg_n-1].add_axes(inset_position) #, axisbg='white') bound2 = inset_xrange b0 = numpy.argmax(sp[0].wave.value > bound2[0]) b1 = numpy.argmin(sp[0].wave.value < bound2[1]) bound2.extend([min(sp[0].flux.value[b0:b1]),max(sp[0].flux.value[b0:b1])]) db = (bound2[3]-bound2[2]) bound2[2] = bound2[2]-0.05*db bound2[3] = bound2[3]+0.05*db ax_inset.axis(bound2) inset_fontsize = fontsize*0.7 for ii,a in enumerate(sp): flx = [i+zeropoint[ii] for i in a.flux.value] ax_inset.plot(a.wave.value,flx,color=colors[ii],linestyle=linestyle[ii]) ax_inset.set_xlabel('') ax_inset.set_ylabel('') ax_inset.tick_params(axis='x', labelsize=inset_fontsize) ax_inset.tick_params(axis='y', labelsize=inset_fontsize) # ax_inset.legend() # inset feature labels if inset_features != False: f = interp1d(sp[0].wave,flx,bounds_error=False,fill_value=0.) wvmax = numpy.arange(bound2[0],bound2[1],0.001) flxmax = f(wvmax) yoff = 0.05*(bound2[3]-bound2[2]) for ftr in inset_features: ftr = ftr.lower() if ftr in feature_labels: for ii,waveRng
<reponame>xingchenwan/nasbowl import collections import copy import logging import random from copy import deepcopy import ConfigSpace import networkx as nx import networkx.algorithms.isomorphism as iso import numpy as np from kernels import GraphKernels, WeisfilerLehman from .gp import GraphGP # === For NASBench-101 ==== MAX_EDGES = 9 VERTICES = 7 OPS = ['conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3'] # === For NASBench-201 === MAX_EDGES_201 = None VERTICES_201 = None OPS_201 = ['nor_conv_3x3', 'nor_conv_1x1', 'avg_pool_3x3', 'skip_connect', 'none'] def prune(original_matrix, ops): """Prune the extraneous parts of the graph. General procedure: 1) Remove parts of graph not connected to input. 2) Remove parts of graph not connected to output. 3) Reorder the vertices so that they are consecutive after steps 1 and 2. These 3 steps can be combined by deleting the rows and columns of the vertices that are not reachable from both the input and output (in reverse). """ num_vertices = np.shape(original_matrix)[0] new_matrix = copy.deepcopy(original_matrix) new_ops = copy.deepcopy(ops) # DFS forward from input visited_from_input = {0} frontier = [0] while frontier: top = frontier.pop() for v in range(top + 1, num_vertices): if original_matrix[top, v] and v not in visited_from_input: visited_from_input.add(v) frontier.append(v) # DFS backward from output visited_from_output = {num_vertices - 1} frontier = [num_vertices - 1] while frontier: top = frontier.pop() for v in range(0, top): if original_matrix[v, top] and v not in visited_from_output: visited_from_output.add(v) frontier.append(v) # Any vertex that isn't connected to both input and output is extraneous to # the computation graph. extraneous = set(range(num_vertices)).difference( visited_from_input.intersection(visited_from_output)) # If the non-extraneous graph is less than 2 vertices, the input is not # connected to the output and the spec is invalid. if len(extraneous) > num_vertices - 2: new_matrix = None new_ops = None valid_spec = False return new_matrix = np.delete(new_matrix, list(extraneous), axis=0) new_matrix = np.delete(new_matrix, list(extraneous), axis=1) for index in sorted(extraneous, reverse=True): del new_ops[index] return new_matrix, new_ops def get_nas101_configuration_space(): nas101_cs = ConfigSpace.ConfigurationSpace() nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("op_node_0", OPS)) nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("op_node_1", OPS)) nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("op_node_2", OPS)) nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("op_node_3", OPS)) nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("op_node_4", OPS)) for i in range(VERTICES * (VERTICES - 1) // 2): nas101_cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("edge_%d" % i, [0, 1])) return nas101_cs # === For NASBench-201 ==== def create_nasbench201_graph(op_node_labelling, edge_attr=False): assert len(op_node_labelling) == 6 # the graph has 8 nodes (6 operation nodes + input + output) G = nx.DiGraph() if edge_attr: edge_list = [(0, 1), (0, 2), (1, 2), (0, 3), (1, 3), (2, 3)] G.add_edges_from(edge_list) edge_attribute = {} remove_edge_list = [] for i, edge in enumerate(edge_list): edge_attribute[edge] = {'op_name': op_node_labelling[i]} if op_node_labelling[i] == 'none': remove_edge_list.append(edge) nx.set_edge_attributes(G, edge_attribute) G.remove_edges_from(remove_edge_list) # after removal, some op nodes have no input nodes and some have no output nodes # --> remove these redundant nodes nodes_to_be_further_removed = [] for n_id in G.nodes(): in_edges = G.in_edges(n_id) out_edges = G.out_edges(n_id) if n_id != 0 and len(in_edges) == 0: nodes_to_be_further_removed.append(n_id) elif n_id != 3 and len(out_edges) == 0: nodes_to_be_further_removed.append(n_id) G.remove_nodes_from(nodes_to_be_further_removed) # Assign dummy variables as node attributes: for i in G.nodes: G.nodes[i]['op_name'] = "1" G.graph_type = 'edge_attr' else: edge_list = [(0, 1), (0, 2), (0, 4), (1, 3), (1, 5), (2, 6), (3, 6), (4, 7), (5, 7), (6, 7)] G.add_edges_from(edge_list) # assign node attributes and collate the information for nodes to be removed # (i.e. nodes with 'skip_connect' or 'none' label) node_labelling = ['input'] + op_node_labelling + ['output'] nodes_to_remove_list = [] remove_nodes_list = [] edges_to_add_list = [] for i, n in enumerate(node_labelling): G.nodes[i]['op_name'] = n if n == 'none' or n == 'skip_connect': input_nodes = [edge[0] for edge in G.in_edges(i)] output_nodes = [edge[1] for edge in G.out_edges(i)] nodes_to_remove_info = {'id': i, 'input_nodes': input_nodes, 'output_nodes': output_nodes} nodes_to_remove_list.append(nodes_to_remove_info) remove_nodes_list.append(i) if n == 'skip_connect': for n_i in input_nodes: edges_to_add = [(n_i, n_o) for n_o in output_nodes] edges_to_add_list += edges_to_add # reconnect edges for removed nodes with 'skip_connect' G.add_edges_from(edges_to_add_list) # remove nodes with 'skip_connect' or 'none' label G.remove_nodes_from(remove_nodes_list) # after removal, some op nodes have no input nodes and some have no output nodes # --> remove these redundant nodes nodes_to_be_further_removed = [] for n_id in G.nodes(): in_edges = G.in_edges(n_id) out_edges = G.out_edges(n_id) if n_id != 0 and len(in_edges) == 0: nodes_to_be_further_removed.append(n_id) elif n_id != 7 and len(out_edges) == 0: nodes_to_be_further_removed.append(n_id) G.remove_nodes_from(nodes_to_be_further_removed) G.graph_type = 'node_attr' # create the arch string for querying nasbench dataset arch_query_string = f'|{op_node_labelling[0]}~0|+' \ f'|{op_node_labelling[1]}~0|{op_node_labelling[2]}~1|+' \ f'|{op_node_labelling[3]}~0|{op_node_labelling[4]}~1|{op_node_labelling[5]}~2|' G.name = arch_query_string return G def get_nas201_configuration_space(): # for unpruned graph cs = ConfigSpace.ConfigurationSpace() ops_choices = ['nor_conv_3x3', 'nor_conv_1x1', 'avg_pool_3x3', 'skip_connect', 'none'] for i in range(6): cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter("edge_%d" % i, ops_choices)) return cs # Regularised evolution to generate new graphs def mutate_arch(parent_arch, benchmark, return_unpruned_arch=True): if benchmark == 'nasbench101': # get parent_arch node label and adjacency matrix child_arch = deepcopy(parent_arch) node_labeling = list(nx.get_node_attributes(child_arch, 'op_name').values()) adjacency_matrix = np.array(nx.adjacency_matrix(child_arch).todense()) parent_node_labeling = deepcopy(node_labeling) parent_adjacency_matrix = deepcopy(adjacency_matrix) dim_op_labeling = (len(node_labeling) - 2) dim_adjacency_matrix = adjacency_matrix.shape[0] * (adjacency_matrix.shape[0] - 1) // 2 mutation_failed = True while mutation_failed: # pick random parameter dim = np.random.randint(dim_op_labeling + dim_adjacency_matrix) if dim < dim_op_labeling: choices = ['conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3'] node_number = int(dim + 1) parent_choice = node_labeling[node_number] # drop current values from potential choices choices.remove(parent_choice) # flip parameter choice_idx = np.random.randint(len(choices)) node_labeling[node_number] = choices[choice_idx] else: choices = [0, 1] # find the corresponding row and colum in adjacency matrix idx = np.triu_indices(adjacency_matrix.shape[0], k=1) edge_i = int(dim - dim_op_labeling) row = idx[0][edge_i] col = idx[1][edge_i] parent_choice = adjacency_matrix[row, col] # drop current values from potential choices choices.remove(parent_choice) # flip parameter choice_idx = np.random.randint(len(choices)) adjacency_matrix[row, col] = choices[choice_idx] try: pruned_adjacency_matrix, pruned_node_labeling = prune(adjacency_matrix, node_labeling) mutation_failed = False except: continue child_arch = nx.from_numpy_array(pruned_adjacency_matrix, create_using=nx.DiGraph) for i, n in enumerate(pruned_node_labeling): child_arch.nodes[i]['op_name'] = n if return_unpruned_arch: child_arch_unpruned = nx.from_numpy_array(adjacency_matrix, create_using=nx.DiGraph) for i, n in enumerate(node_labeling): child_arch_unpruned.nodes[i]['op_name'] = n elif benchmark == 'nasbench201': arch_str_list = parent_arch.name.split('|') op_label_rebuild = [] for str_i in arch_str_list: if '~' in str_i: op_label_rebuild.append(str_i[:-2]) # pick random parameter dim = np.random.randint(len(op_label_rebuild)) parent_choice = op_label_rebuild[dim] # drop current values from potential choices ops_choices = OPS_201[:] ops_choices.remove(parent_choice) # flip parameter choice_idx = np.random.randint(len(ops_choices)) op_label_rebuild[dim] = ops_choices[choice_idx] child_arch = create_nasbench201_graph(op_label_rebuild, edge_attr=parent_arch.graph_type == 'edge_attr') if return_unpruned_arch: child_arch_unpruned = child_arch if return_unpruned_arch: return child_arch, child_arch_unpruned return child_arch, None def sigmoid(x): return 1 / (1 + np.exp(-x)) def regularized_evolution(acquisition_func, observed_archs, observed_archs_unpruned=None, benchmark='nasbench101', pool_size=200, cycles=40, n_mutation=10, batch_size=1, mutate_unpruned_arch=True): """Algorithm for regularized evolution (i.e. aging evolution). Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image Classifier Architecture Search". """ # Generate some random archs into the evaluation pool if mutate_unpruned_arch and observed_archs_unpruned is None: raise ValueError("When mutate_unpruned_arch option is toggled on, you need to supplied the list of unpruned " "observed architectures.") if observed_archs_unpruned is not None: assert len(observed_archs_unpruned) == len(observed_archs), " unequal length between the pruned/unpruned " \ "architecture lists" n_random_archs = pool_size - len(observed_archs) if mutate_unpruned_arch: (random_archs, _, random_archs_unpruned) = random_sampling(pool_size=n_random_archs, benchmark=benchmark, return_unpruned_archs=True) population_unpruned = observed_archs_unpruned + random_archs_unpruned else: (random_archs, _, _) = random_sampling(pool_size=n_random_archs, benchmark=benchmark, ) population_unpruned = None population = observed_archs + random_archs # Fill the population with the observed archs (a list of labelled graphs) and validation error population_performance = [] for i, archs in enumerate(population): arch_acq = acquisition_func.eval(archs, asscalar=True) population_performance.append(arch_acq) # Carry out evolution in cycles. Each cycle produces a bat model and removes another. k_cycle = 0 while k_cycle < cycles: # Sample randomly chosen models from the current population based on the acquisition function values pseudo_prob = np.array(population_performance) / (np.sum(population_performance)) if mutate_unpruned_arch: samples = random.choices(population_unpruned, weights=pseudo_prob, k=30) sample_indices = [population_unpruned.index(s) for s in samples] else: samples = random.choices(population, weights=pseudo_prob, k=30) sample_indices = [population.index(s) for s in samples] sample_performance = [population_performance[idx] for idx in sample_indices] # The parents is the best n_mutation model in the sample. skip 2-node archs top_n_mutation_archs_indices = np.argsort(sample_performance)[-n_mutation:] # argsort>ascending parents_archs = [samples[idx] for idx in top_n_mutation_archs_indices if len(samples[idx].nodes) > 3] # Create the child model and store it. for parent in parents_archs: child, child_unpruned = mutate_arch(parent, benchmark) # skip invalid architectures whose number of edges exceed the max limit of 9 if np.sum(nx.to_numpy_array(child)) > MAX_EDGES: continue if iso.is_isomorphic(child, parent): continue skip = False for prev_edit in population: if iso.is_isomorphic(child, prev_edit, ): skip = True break if skip: continue child_arch_acq = acquisition_func.eval(child, asscalar=True) population.append(child) if mutate_unpruned_arch: population_unpruned.append(child_unpruned) population_performance.append(child_arch_acq) # Remove the worst performing model and
import tia.trad.tools.ipc.naming_conventions as names import tia.trad.tools.arithm.floatArithm as fl import tia.trad.market.orders as orders; reload(orders) from tia.trad.tools.dicDiff import DictDiff import tia.trad.market.events as event; reload(event) import logging import tia.trad.tools.ipc.processLogger as pl LOGGER_NAME = pl.PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME) def cancelOrder(_Order, _ChangedMarket): try: logger.debug("cancelOrder: hi") _ChangedMarket.sendEvent(event.onCancelOrder(names.orderCancel, _ChangedMarket.name, _Order.oid, _Order.type, _Order)) except Exception: raise def placeOrder(_Order, _foundTargets, _ChangedMarket): try: logger.debug("placeOrder: hi") price = _Order.price.quantize(_ChangedMarket.pip) amount = _Order.amount.quantize(_ChangedMarket.pipAmount) dummyOid = 123 _ChangedMarket.sendEvent(event.onPlaceOrder(names.orderPlace, _ChangedMarket.name, price, amount, _Order.type, _Order.datePlaced, _foundTargets, dummyOid)) except Exception: raise def replaceOrder(_Order, _eventDate, _priorityExecutionPrice, _newAmount, _foundTargets, _ChangedMarket): try: logger.debug("replaceOrder: hi") if _Order.price != _priorityExecutionPrice or _Order.amount != _newAmount: cancelOrder(_Order, _ChangedMarket) newOrder = orders.Order(_ChangedMarket.name, _priorityExecutionPrice, _newAmount, _Order.type, _eventDate) placeOrder(newOrder, _foundTargets, _ChangedMarket) except Exception: raise def replaceOnL2Change(_changedSide, _ChangedMarket, _UniverseD): # cancel current AO and replace tighter to l2 try: logger.debug("replaceOnL2Change: hi") foundTargets = _findTargets(_changedSide, _ChangedMarket, _UniverseD) # sim our action instantly so that we can calculate priority price correctly #e.g. del out top order if _changedSide == names.asks: AO = _ChangedMarket.activeOrders.asks l1ask = _ChangedMarket.orderbook.asks.smallest_key() del _ChangedMarket.orderbook.asks[l1ask] elif _changedSide == names.bids: AO = _ChangedMarket.activeOrders.bids l1bid = _ChangedMarket.orderbook.bids.largest_key() del _ChangedMarket.orderbook.bids[l1bid] else: raise Exception("unknown side: %s" % _changedSide) [newAmount, newPriorityExecutionPrice] = getNewAmount(_changedSide, foundTargets, _ChangedMarket) if newAmount: replaceOrder(AO, AO.datePlaced, newPriorityExecutionPrice, newAmount, foundTargets, _ChangedMarket) except Exception: raise def getPriorityExecutionPrice(_changedSide, _topOrderPrice, _largestBid, _smallestAsk, _ChangedMarket): # don't overbid own orders. if top price the same as ours, replaceOrder will not replace it unless the amount changes try: logger.debug("getPriorityExecutionPrice: hi") pip = _ChangedMarket.pip if _changedSide == names.bids: priorityExecutionPrice = _topOrderPrice + pip AO = _ChangedMarket.activeOrders.bids if AO: if AO.price == _largestBid: priorityExecutionPrice = AO.price elif _changedSide == names.asks: priorityExecutionPrice = _topOrderPrice - pip AO = _ChangedMarket.activeOrders.asks if AO: if AO.price == _smallestAsk: priorityExecutionPrice = AO.price else: raise Exception("Unknown side: %s" % _changedSide) return priorityExecutionPrice except Exception: raise def _findTargets(_changedSide, _ChangedMarket, _UniverseD): try: logger.debug("_findTargets: hi") # recognize opportunity foundTargets = {} for strategy in _ChangedMarket.activeStrategies.emitsLimitOrder.values(): retDct = strategy.findExits(_changedSide, _ChangedMarket, _UniverseD) foundTargets.update(retDct) return foundTargets except Exception: raise def getNewAmount(_changedSide, _foundTargets, _ChangedMarket): try: logger.debug("getNewAmount: hi") # get amount from TM targetManagerQuote = _ChangedMarket.targetManager.targetsAmountSum(_changedSide, _ChangedMarket) topOrderPrice = targetManagerQuote[0] # get a price for priority execution on curr market onePip = _ChangedMarket.pip; smallestAsk = _ChangedMarket.orderbook.asks.smallest_key(); largestBid = _ChangedMarket.orderbook.bids.largest_key() # if our order at the top, priorityPrice == same as top, so it will get replaced only if amount changes if smallestAsk - largestBid > onePip: priorityExecutionPrice = getPriorityExecutionPrice(_changedSide, topOrderPrice, largestBid, smallestAsk, _ChangedMarket) else: priorityExecutionPrice = largestBid if _changedSide == names.bids else smallestAsk # get amount for curr market from TM availItems = _ChangedMarket.account.getResources(_changedSide, priorityExecutionPrice, _ChangedMarket) # all amounts are in items! maxSaturationAtSomePrice = 2 TMamountSum = targetManagerQuote[1] # if in bids, change returned funds into items if _changedSide == names.bids: TMamountSum = (TMamountSum / priorityExecutionPrice).quantize(_ChangedMarket.pip) # keep always some funds to realize TM if availItems <= _ChangedMarket.minAllowableBet: newAmount = fl.D("0") # keep some funds to realize TM # TM saturation: wait for TM to realize this amount first #elif amountSum > maxSaturationAtSomePrice: newAmount = min([amountSum, availItems]) # place a bet in addition to TM realization amount else: minBet = fl.D("1") if availItems > 10 else fl.D("0.1") foundExitsBet = min([TMamountSum + minBet, availItems]) newAmount = foundExitsBet if _foundTargets else min([TMamountSum, availItems]) logger.debug("newAm: %s, priorityPrice: %s" % (newAmount, priorityExecutionPrice)) if newAmount <= _ChangedMarket.minAllowableBet: newAmount = 0 return [newAmount, priorityExecutionPrice] except Exception: raise def getAmountForMarketOrder(_changedSide, _ChangedMarket): try: logger.debug("getAmountForMarketOrder: hi") res_ = {"newAmount": None, "changedSideAO": None} exchangeRate = _ChangedMarket.exchangeRates[_ChangedMarket.currency] targetSide = names.bids if _changedSide == names.asks else names.asks if _changedSide == names.bids: AO = _ChangedMarket.activeOrders.bids opposedAO = _ChangedMarket.activeOrders.asks # try to realize items try: target = _ChangedMarket.targetManager.targetItems.smallest_item() targetPrice = target[0] targetAmount = target[1] except KeyError: target = None if target: topOrderIter = _ChangedMarket.orderbook.bids.iteritems(reverse=True) topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1] # check if top order ours try: if AO.price == topOrderPrice: topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1] res_["changedSideAO"] = AO # AO might be behind top order so check the top order except AttributeError: pass #AO might not exist finally: # check if it crosses if topOrderPrice * exchangeRate + 10 * _ChangedMarket.pip > targetPrice: logger.debug("topOrderUSD:%s, target:%s" % (topOrderPrice * exchangeRate, targetPrice)) availItems = _ChangedMarket.account.getResources(targetSide, topOrderPrice, _ChangedMarket) amount = min([targetAmount, topOrderAmount, availItems]) res_["newAmount"] = amount res_["priorityExecutionPrice"] = topOrderPrice res_["side"] = targetSide res_["oppositeAO"] = opposedAO elif _changedSide == names.asks: AO = _ChangedMarket.activeOrders.asks opposedAO = _ChangedMarket.activeOrders.bids # try to realize funds try: target = _ChangedMarket.targetManager.targetFunds.largest_item() targetPrice = target[0] targetAmount = target[1] except KeyError: target = None if target: topOrderIter = _ChangedMarket.orderbook.asks.iteritems() topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1] # check if top order ours try: if AO.price == topOrderPrice: topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1] res_["changedSideAO"] = AO except AttributeError: pass finally: # check if it crosses if topOrderPrice * exchangeRate - 10 * _ChangedMarket.pip < targetPrice: logger.debug("topOrderUSD:%s, target:%s" % (topOrderPrice * exchangeRate, targetPrice)) availItems = _ChangedMarket.account.getResources(targetSide, topOrderPrice, _ChangedMarket) amount = min([targetAmount, topOrderAmount, availItems]) res_["newAmount"] = amount res_["priorityExecutionPrice"] = topOrderPrice res_["side"] = targetSide res_["oppositeAO"] = opposedAO else: raise Exception("unknown side: %s" % _changedSide) logger.debug("getAmountForMarketOrder: %s" % res_) return res_ except Exception: raise def _manageCurrentMarket(_eventDate, _changedSide, _foundTargets, _ChangedMarket): try: logger.debug("_manageCurrentMarket: hi") [newAmount, priorityExecutionPrice] = getNewAmount(_changedSide, _foundTargets, _ChangedMarket) # check if we have to alter order already in the market activeOrder = _ChangedMarket.activeOrders.bids if _changedSide == names.bids else _ChangedMarket.activeOrders.asks logger.debug("activeOrder: %s" % activeOrder) if activeOrder: if newAmount: replaceOrder(activeOrder, _eventDate, priorityExecutionPrice, newAmount, _foundTargets, _ChangedMarket) # newAmount can be 0 else: cancelOrder(activeOrder, _ChangedMarket) else: if newAmount: newOrder = orders.Order(_ChangedMarket.name, priorityExecutionPrice, newAmount, _changedSide, _eventDate) placeOrder(newOrder, _foundTargets, _ChangedMarket) except Exception: raise def _manageFoundTargets(_eventDate, _changedSide, _foundTargets, _ChangedMarket, _UniverseD): """ manage exposure: expose or change activeOrder """ try: logger.debug("_manageFoundTargets: hi") targetSide = names.asks if _changedSide == names.bids else names.bids for targetMarketname in _foundTargets: TargetMarket = _UniverseD[targetMarketname] if TargetMarket != _ChangedMarket: # since for our market we already manage it _manageCurrentMarket(_eventDate, targetSide, _foundTargets, TargetMarket) except Exception: raise def _manageLostTargets(_eventDate, _changedSide, _foundTargets, _ChangedMarket, _UniverseD): try: logger.debug("_manageLostTargets: hi") #HANDLE MARKETS WITH NO SIGNAL # check markets with no signal: if an order sees still a target after sgn loss from our market, leave it # else get amount from targetManager lostSignal = DictDiff(_foundTargets, _UniverseD).removed() for marketName in lostSignal: LostMarket = _UniverseD[marketName] if LostMarket != _ChangedMarket: # since changedMarket is already handeled if _changedSide == names.bids: activeOrder = LostMarket.activeOrders.asks targetSide = names.asks elif _changedSide == names.asks: activeOrder = LostMarket.activeOrders.bids targetSide = names.bids else: raise Exception("unknown side: %s" % _changedSide) if activeOrder: # remove our market from targets since no more signal try: del activeOrder.targets[_ChangedMarket.name] except KeyError: pass if activeOrder.targets: pass # has other targets, leave it alone else: # has no more targets -> no bets, just empty TM: get amount from targetManager targetManagerQuote = LostMarket.targetManager.targetsAmountSum(targetSide, LostMarket) amountSum = targetManagerQuote[1] newAmount = min([amountSum, LostMarket.minAllowableBet, LostMarket.account.getResources(targetSide, activeOrder.price, LostMarket)]) # check if the order has to be replaced if newAmount: replaceOrder(activeOrder, _eventDate, activeOrder.price, newAmount, {}, LostMarket) else: cancelOrder(activeOrder, LostMarket) except Exception: raise def manageUniverse(_eventDate, _ChangedMarket, _UniverseD): try: logger.debug("\nmanageUniverse: hi") """ #check if TM can try to empty itself with a market order marketQuote = _MarketOfChange.targetManager.emitMarketQuote(side, _MarketOfChange) if marketQuote: pass #marketOrderProcedure - else: #TODO handle marketOrder Strategies # recognize opportunity for a market order #for strategy in _MarketOfChange.activeStrategies.emitsMarketOrder: # marketQuote = strategy.returnMarketQuote(side, _MarketOfChange, _MarketsOtherD) # get marketQuote if marketQuote: pass #marketOrderProcedure - # recognize opportunity for limit order in this Market else: """ l1askChanged = _ChangedMarket.filters.minAskChanged; l1bidChanged = _ChangedMarket.filters.maxBidChanged AOA = _ChangedMarket.activeOrders.asks; AOB = _ChangedMarket.activeOrders.bids l2askChanged = AOA and _ChangedMarket.filters.secondaryAskChanged l2bidChanged = AOB and _ChangedMarket.filters.secondaryBidChanged l1b = _ChangedMarket.orderbook.bids.largest_item() l1a = _ChangedMarket.orderbook.asks.smallest_item() L1 = "L1changed"; L2 = "L2changed"; ActiveOrder = "ActiveOrder"; topOrder = "topOrder" d = {names.bids: {L1: l1bidChanged, L2: l2bidChanged, ActiveOrder: AOB, topOrder: l1b}, names.asks: {L1: l1askChanged, L2: l2askChanged, ActiveOrder: AOA, topOrder: l1a} } # check for market orders foundMO = 0 """ for changedSide in d: marketOrderD = getAmountForMarketOrder(changedSide, _ChangedMarket) amount = marketOrderD["newAmount"] if amount > _ChangedMarket.minAllowableBet: foundMO = 1 changedSideAO = marketOrderD["changedSideAO"] if changedSideAO: cancelOrder(changedSideAO, _ChangedMarket) newOrder = orders.Order(_ChangedMarket.name, marketOrderD["priorityExecutionPrice"], marketOrderD["newAmount"], marketOrderD["side"], _eventDate) AO = marketOrderD["oppositeAO"] if AO: cancelOrder(AO, _ChangedMarket) placeOrder(newOrder, {}, _ChangedMarket) # limit orders
r'h_\mathrm{in,1}\right)+ kA \cdot \frac{T_\mathrm{out,1} - ' r'T_\mathrm{in,2} - T_\mathrm{in,1} + T_\mathrm{out,2}}' r'{\ln{\frac{T_\mathrm{out,1} - T_\mathrm{in,2}}' r'{T_\mathrm{in,1} - T_\mathrm{out,2}}}}' ) return generate_latex_eq(self, latex, label) def kA_deriv(self, increment_filter, k): r""" Partial derivatives of heat transfer coefficient function. Parameters ---------- increment_filter : ndarray Matrix for filtering non-changing variables. k : int Position of derivatives in Jacobian matrix (k-th equation). """ f = self.kA_func self.jacobian[k, 0, 0] = self.outl[0].h.val_SI - self.inl[0].h.val_SI for i in range(4): if not increment_filter[i, 1]: self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i) if not increment_filter[i, 2]: self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i) def kA_char_func(self): r""" Calculate heat transfer from heat transfer coefficient characteristic. Returns ------- residual : float Residual value of equation. .. math:: 0 = \dot{m}_{in,1} \cdot \left( h_{out,1} - h_{in,1}\right) + kA_{design} \cdot f_{kA} \cdot \frac{T_{out,1} - T_{in,2} - T_{in,1} + T_{out,2}} {\ln{\frac{T_{out,1} - T_{in,2}}{T_{in,1} - T_{out,2}}}} f_{kA} = \frac{2}{\frac{1}{f_1\left( expr_1\right)} + \frac{1}{f_2\left( expr_2\right)}} Note ---- For standard functions f\ :subscript:`1` \ and f\ :subscript:`2` \ see module :py:mod:`tespy.data`. """ p1 = self.kA_char1.param p2 = self.kA_char2.param f1 = self.get_char_expr(p1, **self.kA_char1.char_params) f2 = self.get_char_expr(p2, **self.kA_char2.char_params) i1 = self.inl[0] i2 = self.inl[1] o1 = self.outl[0] o2 = self.outl[1] # temperature value manipulation for convergence stability T_i1 = T_mix_ph(i1.get_flow(), T0=i1.T.val_SI) T_i2 = T_mix_ph(i2.get_flow(), T0=i2.T.val_SI) T_o1 = T_mix_ph(o1.get_flow(), T0=o1.T.val_SI) T_o2 = T_mix_ph(o2.get_flow(), T0=o2.T.val_SI) if T_i1 <= T_o2: T_i1 = T_o2 + 0.01 if T_i1 <= T_o2: T_o2 = T_i1 - 0.01 if T_i1 <= T_o2: T_o1 = T_i2 + 0.02 if T_o1 <= T_i2: T_i2 = T_o1 - 0.02 td_log = ((T_o1 - T_i2 - T_i1 + T_o2) / np.log((T_o1 - T_i2) / (T_i1 - T_o2))) fkA1 = self.kA_char1.char_func.evaluate(f1) fkA2 = self.kA_char2.char_func.evaluate(f2) fkA = 2 / (1 / fkA1 + 1 / fkA2) return ( i1.m.val_SI * (o1.h.val_SI - i1.h.val_SI) + self.kA.design * fkA * td_log) def kA_char_func_doc(self, label): r""" Calculate heat transfer from heat transfer coefficient characteristic. Parameters ---------- label : str Label for equation. Returns ------- latex : str LaTeX code of equations applied. """ latex = ( r'\begin{split}' + '\n' r'0 = & \dot{m}_\mathrm{in,1} \cdot \left( h_\mathrm{out,1} - ' r'h_\mathrm{in,1}\right)\\' + '\n' r'&+kA_\mathrm{design} \cdot ' r'f_\mathrm{kA} \cdot \frac{T_\mathrm{out,1} - T_\mathrm{in,2}' r' - T_\mathrm{in,1} + T_\mathrm{out,2}}{\ln{' r'\frac{T_\mathrm{out,1} - T_\mathrm{in,2}}{T_\mathrm{in,1} -' r' T_\mathrm{out,2}}}}\\' + '\n' r'f_\mathrm{kA}=&\frac{2}{\frac{1}{f\left(X_1\right)}+' r'\frac{1}{f\left(X_2\right)}}\\' + '\n' r'\end{split}' ) return generate_latex_eq(self, latex, label) def kA_char_deriv(self, increment_filter, k): r""" Partial derivatives of heat transfer coefficient characteristic. Parameters ---------- increment_filter : ndarray Matrix for filtering non-changing variables. k : int Position of derivatives in Jacobian matrix (k-th equation). """ f = self.kA_char_func if not increment_filter[0, 0]: self.jacobian[k, 0, 0] = self.numeric_deriv(f, 'm', 0) if not increment_filter[1, 0]: self.jacobian[k, 1, 0] = self.numeric_deriv(f, 'm', 1) for i in range(4): if not increment_filter[i, 1]: self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i) if not increment_filter[i, 2]: self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i) def ttd_u_func(self): r""" Equation for upper terminal temperature difference. Returns ------- residual : float Residual value of equation. .. math:: 0 = ttd_{u} - T_{in,1} + T_{out,2} """ T_i1 = T_mix_ph(self.inl[0].get_flow(), T0=self.inl[0].T.val_SI) T_o2 = T_mix_ph(self.outl[1].get_flow(), T0=self.outl[1].T.val_SI) return self.ttd_u.val - T_i1 + T_o2 def ttd_u_func_doc(self, label): r""" Equation for upper terminal temperature difference. Parameters ---------- label : str Label for equation. Returns ------- latex : str LaTeX code of equations applied. """ latex = r'0 = ttd_\mathrm{u} - T_\mathrm{in,1} + T_\mathrm{out,2}' return generate_latex_eq(self, latex, label) def ttd_u_deriv(self, increment_filter, k): """ Calculate partial derivates of upper terminal temperature function. Parameters ---------- increment_filter : ndarray Matrix for filtering non-changing variables. k : int Position of derivatives in Jacobian matrix (k-th equation). """ f = self.ttd_u_func for i in [0, 3]: if not increment_filter[i, 1]: self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i) if not increment_filter[i, 2]: self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i) def ttd_l_func(self): r""" Equation for upper terminal temperature difference. Returns ------- residual : float Residual value of equation. .. math:: 0 = ttd_{l} - T_{out,1} + T_{in,2} """ T_i2 = T_mix_ph(self.inl[1].get_flow(), T0=self.inl[1].T.val_SI) T_o1 = T_mix_ph(self.outl[0].get_flow(), T0=self.outl[0].T.val_SI) return self.ttd_l.val - T_o1 + T_i2 def ttd_l_func_doc(self, label): r""" Equation for upper terminal temperature difference. Parameters ---------- label : str Label for equation. Returns ------- latex : str LaTeX code of equations applied. """ latex = r'0 = ttd_\mathrm{l} - T_\mathrm{out,1} + T_\mathrm{in,2}' return generate_latex_eq(self, latex, label) def ttd_l_deriv(self, increment_filter, k): """ Calculate partial derivates of upper terminal temperature function. Parameters ---------- increment_filter : ndarray Matrix for filtering non-changing variables. k : int Position of derivatives in Jacobian matrix (k-th equation). """ f = self.ttd_l_func for i in [1, 2]: if not increment_filter[i, 1]: self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i) if not increment_filter[i, 2]: self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i) def bus_func(self, bus): r""" Calculate the value of the bus function. Parameters ---------- bus : tespy.connections.bus.Bus TESPy bus object. Returns ------- val : float Value of energy transfer :math:`\dot{E}`. This value is passed to :py:meth:`tespy.components.component.Component.calc_bus_value` for value manipulation according to the specified characteristic line of the bus. .. math:: \dot{E} = \dot{m}_{in,1} \cdot \left( h_{out,1} - h_{in,1} \right) """ return self.inl[0].m.val_SI * ( self.outl[0].h.val_SI - self.inl[0].h.val_SI) def bus_func_doc(self, bus): r""" Return LaTeX string of the bus function. Parameters ---------- bus : tespy.connections.bus.Bus TESPy bus object. Returns ------- latex : str LaTeX string of bus function. """ return ( r'\dot{m}_\mathrm{in,1} \cdot \left(h_\mathrm{out,1} - ' r'h_\mathrm{in,1} \right)') def bus_deriv(self, bus): r""" Calculate partial derivatives of the bus function. Parameters ---------- bus : tespy.connections.bus.Bus TESPy bus object. Returns ------- deriv : ndarray Matrix of partial derivatives. """ deriv = np.zeros((1, 4, self.num_nw_vars)) f = self.calc_bus_value deriv[0, 0, 0] = self.numeric_deriv(f, 'm', 0, bus=bus) deriv[0, 0, 2] = self.numeric_deriv(f, 'h', 0, bus=bus) deriv[0, 2, 2] = self.numeric_deriv(f, 'h', 2, bus=bus) return deriv def initialise_source(self, c, key): r""" Return a starting value for pressure and enthalpy at outlet. Parameters ---------- c : tespy.connections.connection.Connection Connection to perform initialisation on. key : str Fluid property to retrieve. Returns ------- val : float Starting value for pressure/enthalpy in SI units. .. math:: val = \begin{cases} 4 \cdot 10^5 & \text{key = 'p'}\\ h\left(p, 200 \text{K} \right) & \text{key = 'h' at outlet 1}\\ h\left(p, 250 \text{K} \right) & \text{key = 'h' at outlet 2} \end{cases} """ if key == 'p': return 50e5 elif key == 'h': if c.source_id == 'out1': T = 200 + 273.15 return h_mix_pT(c.get_flow(), T) else: T = 250 + 273.15 return h_mix_pT(c.get_flow(), T) def initialise_target(self, c, key): r""" Return a starting value for pressure and enthalpy at inlet. Parameters ---------- c : tespy.connections.connection.Connection Connection to perform initialisation on. key : str Fluid property to retrieve. Returns ------- val : float Starting value for pressure/enthalpy in SI units. .. math:: val = \begin{cases} 4 \cdot 10^5 & \text{key = 'p'}\\ h\left(p, 300 \text{K} \right) & \text{key = 'h' at inlet 1}\\ h\left(p, 220 \text{K} \right) & \text{key = 'h' at outlet 2} \end{cases} """ if key == 'p': return 50e5 elif key == 'h': if c.target_id == 'in1': T = 300 + 273.15 return h_mix_pT(c.get_flow(), T) else: T = 220 + 273.15 return h_mix_pT(c.get_flow(), T) def calc_parameters(self): r"""Postprocessing parameter calculation.""" # component parameters self.Q.val = self.inl[0].m.val_SI * ( self.outl[0].h.val_SI - self.inl[0].h.val_SI) self.ttd_u.val = self.inl[0].T.val_SI - self.outl[1].T.val_SI self.ttd_l.val = self.outl[0].T.val_SI - self.inl[1].T.val_SI # pr and zeta for i in range(2): self.get_attr('pr' + str(i + 1)).val = ( self.outl[i].p.val_SI / self.inl[i].p.val_SI) self.get_attr('zeta' + str(i + 1)).val = ( (self.inl[i].p.val_SI - self.outl[i].p.val_SI) * np.pi ** 2 / ( 4 * self.inl[i].m.val_SI ** 2 * (self.inl[i].vol.val_SI + self.outl[i].vol.val_SI) )) # kA and logarithmic temperature difference if self.ttd_u.val < 0 or self.ttd_l.val < 0: self.td_log.val = np.nan self.kA.val = np.nan else: self.td_log.val = ((self.ttd_l.val - self.ttd_u.val) / np.log(self.ttd_l.val / self.ttd_u.val)) self.kA.val = -self.Q.val / self.td_log.val def entropy_balance(self): r""" Calculate entropy balance of a heat exchanger. The allocation of the entropy streams due to heat exchanged and due to irreversibility is performed by solving
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from tempest.common.rest_client import RestClient from tempest.services.network import network_client_base class NetworkClientJSON(network_client_base.NetworkClientBase): """ Tempest REST client for Neutron. Uses v2 of the Neutron API, since the V1 API has been removed from the code base. Implements create, delete, update, list and show for the basic Neutron abstractions (networks, sub-networks, routers, ports and floating IP): Implements add/remove interface to router using subnet ID / port ID It also implements list, show, update and reset for OpenStack Networking quotas """ # def __init__(self, config, username, password, auth_url, token_url, # tenant_name=None): # super(NetworkClientJSON, self).__init__(config, username, password, # auth_url, token_url, # tenant_name) # self.service = self.config.network.catalog_type # self.version = '2.0' # self.uri_prefix = "v%s" % (self.version) def get_rest_client(self, config, username, password, auth_url, token_url, tenant_name=None): return RestClient(config, username, password, auth_url, token_url, tenant_name) def deserialize_single(self, body): return json.loads(body) def deserialize_list(self, body): res = json.loads(body) # expecting response in form # {'resources': [ res1, res2] } return res[res.keys()[0]] def serialize(self, data): return json.dumps(data) def create_network(self, name, **kwargs): post_body = {'network': kwargs} post_body['network']['name'] = name body = json.dumps(post_body) uri = '%s/networks' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_bulk_network(self, count, names): network_list = list() for i in range(count): network_list.append({'name': names[i]}) post_body = {'networks': network_list} body = json.dumps(post_body) uri = '%s/networks' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_subnet(self, net_uuid, cidr, ip_version=4, **kwargs): post_body = {'subnet': kwargs} post_body['subnet']['ip_version'] = ip_version post_body['subnet']['network_id'] = net_uuid post_body['subnet']['cidr'] = cidr body = json.dumps(post_body) uri = '%s/subnets' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_port(self, network_id, **kwargs): post_body = { 'port': { 'network_id': network_id, } } for key, val in kwargs.items(): post_body['port'][key] = val body = json.dumps(post_body) uri = '%s/ports' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def update_quotas(self, tenant_id, **kwargs): put_body = {'quota': kwargs} body = json.dumps(put_body) uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body['quota'] def reset_quotas(self, tenant_id): uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.delete(uri) return resp, body def update_subnet(self, subnet_id, new_name): put_body = { 'subnet': { 'name': new_name, } } body = json.dumps(put_body) uri = '%s/subnets/%s' % (self.uri_prefix, subnet_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def update_port(self, port_id, new_name): put_body = { 'port': { 'name': new_name, } } body = json.dumps(put_body) uri = '%s/ports/%s' % (self.uri_prefix, port_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def update_network(self, network_id, new_name): put_body = { "network": { "name": new_name, } } body = json.dumps(put_body) uri = '%s/networks/%s' % (self.uri_prefix, network_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def create_router(self, name, admin_state_up=True, **kwargs): post_body = {'router': kwargs} post_body['router']['name'] = name post_body['router']['admin_state_up'] = admin_state_up body = json.dumps(post_body) uri = '%s/routers' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def _update_router(self, router_id, set_enable_snat, **kwargs): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.get(uri) body = json.loads(body) update_body = {} update_body['name'] = kwargs.get('name', body['router']['name']) update_body['admin_state_up'] = kwargs.get( 'admin_state_up', body['router']['admin_state_up']) cur_gw_info = body['router']['external_gateway_info'] if cur_gw_info and not set_enable_snat: cur_gw_info.pop('enable_snat', None) update_body['external_gateway_info'] = kwargs.get( 'external_gateway_info', body['router']['external_gateway_info']) update_body = dict(router=update_body) update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) body = json.loads(body) return resp, body def update_router(self, router_id, **kwargs): """Update a router leaving enable_snat to its default value.""" # If external_gateway_info contains enable_snat the request will fail # with 404 unless executed with admin client, and therefore we instruct # _update_router to not set this attribute # NOTE(salv-orlando): The above applies as long as Neutron's default # policy is to restrict enable_snat usage to admins only. return self._update_router(router_id, set_enable_snat=False, **kwargs) def update_router_with_snat_gw_info(self, router_id, **kwargs): """Update a router passing also the enable_snat attribute. This method must be execute with admin credentials, otherwise the API call will return a 404 error. """ return self._update_router(router_id, set_enable_snat=True, **kwargs) def add_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) body = json.loads(body) return resp, body def add_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) body = json.loads(body) return resp, body def remove_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) body = json.loads(body) return resp, body def remove_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) body = json.loads(body) return resp, body def create_floating_ip(self, ext_network_id, **kwargs): post_body = { 'floatingip': kwargs} post_body['floatingip']['floating_network_id'] = ext_network_id body = json.dumps(post_body) uri = '%s/floatingips' % (self.uri_prefix) resp, body = self.post(uri, body=body) body = json.loads(body) return resp, body def create_security_group(self, name, **kwargs): post_body = { 'security_group': { 'name': name, } } for key, value in kwargs.iteritems(): post_body['security_group'][str(key)] = value body = json.dumps(post_body) uri = '%s/security-groups' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def update_floating_ip(self, floating_ip_id, **kwargs): post_body = { 'floatingip': kwargs} body = json.dumps(post_body) uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def create_security_group_rule(self, secgroup_id, direction='ingress', **kwargs): post_body = { 'security_group_rule': { 'direction': direction, 'security_group_id': secgroup_id } } for key, value in kwargs.iteritems(): post_body['security_group_rule'][str(key)] = value body = json.dumps(post_body) uri = '%s/security-group-rules' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_bulk_subnet(self, subnet_list): post_body = {'subnets': subnet_list} body = json.dumps(post_body) uri = '%s/subnets' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_bulk_port(self, port_list): post_body = {'ports': port_list} body = json.dumps(post_body) uri = '%s/ports' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id): post_body = { "vip": { "protocol": protocol, "name": name, "subnet_id": subnet_id, "pool_id": pool_id, "protocol_port": protocol_port } } body = json.dumps(post_body) uri = '%s/lb/vips' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def update_vip(self, vip_id, new_name): put_body = { "vip": { "name": new_name, } } body = json.dumps(put_body) uri = '%s/lb/vips/%s' % (self.uri_prefix, vip_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def create_member(self, address, protocol_port, pool_id): post_body = { "member": { "protocol_port": protocol_port, "pool_id": pool_id, "address": address } } body = json.dumps(post_body) uri = '%s/lb/members' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def update_member(self, admin_state_up, member_id): put_body = { "member": { "admin_state_up": admin_state_up } } body = json.dumps(put_body) uri = '%s/lb/members/%s' % (self.uri_prefix, member_id) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def create_health_monitor(self, delay, max_retries, Type, timeout): post_body = { "health_monitor": { "delay": delay, "max_retries": max_retries, "type": Type, "timeout": timeout } } body = json.dumps(post_body) uri = '%s/lb/health_monitors' % (self.uri_prefix) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def update_health_monitor(self, admin_state_up, uuid): put_body = { "health_monitor": { "admin_state_up": admin_state_up } } body = json.dumps(put_body) uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid) resp, body = self.put(uri, body) body = json.loads(body) return resp, body def associate_health_monitor_with_pool(self, health_monitor_id, pool_id): post_body = { "health_monitor": { "id": health_monitor_id, } } body = json.dumps(post_body) uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix, pool_id) resp, body = self.post(uri, body) body = json.loads(body) return resp, body def disassociate_health_monitor_with_pool(self, health_monitor_id, pool_id): uri = '%s/lb/pools/%s/health_monitors/%s' %
#!/bin/bash python """ Main entry point for the ANDES CLI and scripting interfaces. """ # [ANDES] (C)2015-2022 <NAME> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # File name: main.py # Last modified: 8/16/20, 7:26 PM import cProfile import glob import io import logging import os import platform import pprint import pstats import sys from functools import partial from subprocess import call from time import sleep from typing import Optional, Union from ._version import get_versions import andes from andes.routines import routine_cli from andes.shared import Pool, Process, coloredlogs, unittest, NCPUS_PHYSICAL from andes.system import System from andes.utils.misc import elapsed, is_interactive from andes.utils.paths import get_config_path, get_log_dir, tests_root logger = logging.getLogger(__name__) def config_logger(stream_level=logging.INFO, *, stream=True, file=True, log_file='andes.log', log_path=None, file_level=logging.DEBUG, ): """ Configure an ANDES logger with a `FileHandler` and a `StreamHandler`. This function is called at the beginning of ``andes.main.main()``. Updating ``stream_level`` and ``file_level`` is now supported. Parameters ---------- stream : bool, optional Create a `StreamHandler` for `stdout` if ``True``. If ``False``, the handler will not be created. file : bool, optionsl True if logging to ``log_file``. log_file : str, optional Logg file name for `FileHandler`, ``'andes.log'`` by default. If ``None``, the `FileHandler` will not be created. log_path : str, optional Path to store the log file. By default, the path is generated by get_log_dir() in utils.misc. stream_level : {10, 20, 30, 40, 50}, optional `StreamHandler` verbosity level. file_level : {10, 20, 30, 40, 50}, optional `FileHandler` verbosity level. Returns ------- None """ lg = logging.getLogger('andes') lg.setLevel(logging.DEBUG) if log_path is None: log_path = get_log_dir() sh_formatter_str = '%(message)s' if stream_level == 1: sh_formatter_str = '%(name)s:%(lineno)d - %(levelname)s - %(message)s' stream_level = 10 sh_formatter = logging.Formatter(sh_formatter_str) if len(lg.handlers) == 0: # create a StreamHandler if stream is True: sh = logging.StreamHandler() sh.setFormatter(sh_formatter) sh.setLevel(stream_level) lg.addHandler(sh) # file handler for level DEBUG and up if file is True and (log_file is not None): log_full_path = os.path.join(log_path, log_file) fh_formatter = logging.Formatter('%(process)d: %(asctime)s - %(name)s - %(levelname)s - %(message)s') fh = logging.FileHandler(log_full_path) fh.setLevel(file_level) fh.setFormatter(fh_formatter) lg.addHandler(fh) globals()['logger'] = lg else: # update the handlers set_logger_level(logger, logging.StreamHandler, stream_level) set_logger_level(logger, logging.FileHandler, file_level) if not is_interactive(): coloredlogs.install(logger=lg, level=stream_level, fmt=sh_formatter_str) def edit_conf(edit_config: Optional[Union[str, bool]] = ''): """ Edit the Andes config file which occurs first in the search path. Parameters ---------- edit_config : bool If ``True``, try to open up an editor and edit the config file. Otherwise returns. Returns ------- bool ``True`` is a config file is found and an editor is opened. ``False`` if ``edit_config`` is False. """ ret = False # no `edit-config` supplied if edit_config == '': return ret conf_path = get_config_path() if conf_path is None: logger.info('Config file does not exist. Automatically saving.') system = System() conf_path = system.save_config() logger.info('Editing config file "%s"', conf_path) editor = '' if edit_config is not None: # use `edit_config` as default editor editor = edit_config else: # use the following default editors if platform.system() == 'Linux': editor = os.environ.get('EDITOR', 'vim') elif platform.system() == 'Darwin': editor = os.environ.get('EDITOR', 'vim') elif platform.system() == 'Windows': editor = 'notepad.exe' editor_cmd = editor.split() editor_cmd.append(conf_path) call(editor_cmd) ret = True return ret def save_conf(config_path=None, overwrite=None, **kwargs): """ Save the Andes config to a file at the path specified by ``save_config``. The save action will not run if ``save_config = ''``. Parameters ---------- config_path : None or str, optional, ('' by default) Path to the file to save the config file. If the path is an emtpy string, the save action will not run. Save to `~/.andes/andes.conf` if ``None``. Returns ------- bool ``True`` is the save action is run. ``False`` otherwise. """ ret = False # no ``--save-config `` if config_path == '': return ret if config_path is not None and os.path.isdir(config_path): config_path = os.path.join(config_path, 'andes.rc') ps = System(**kwargs) ps.save_config(config_path, overwrite=overwrite) ret = True return ret def remove_output(recursive=False): """ Remove the outputs generated by Andes, including power flow reports ``_out.txt``, time-domain list ``_out.lst`` and data ``_out.dat``, eigenvalue analysis report ``_eig.txt``. Parameters ---------- recursive : bool Recursively clean all subfolders Returns ------- bool ``True`` is the function body executes with success. ``False`` otherwise. """ found = False cwd = os.getcwd() if recursive: dirs = [x[0] for x in os.walk(cwd)] else: dirs = (cwd,) for d in dirs: for file in os.listdir(d): if file.endswith('_eig.txt') or \ file.endswith('_out.txt') or \ file.endswith('_out.lst') or \ file.endswith('_out.npy') or \ file.endswith('_out.npz') or \ file.endswith('_prof.prof') or \ file.endswith('_prof.txt'): found = True try: os.remove(os.path.join(d, file)) logger.info('"%s" removed.', os.path.join(d, file)) except IOError: logger.error('Error removing file "%s".', os.path.join(d, file)) if not found: logger.info('No output file found in the working directory.') return True def print_license(): """ Print out Andes license to stdout. """ print(f""" ANDES version {andes.__version__} Copyright (c) 2015-2022 <NAME> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. A copy of the GNU General Public License is included below. For further information, see <http://www.gnu.org/licenses/>. """) return True def load(case, codegen=False, setup=True, use_input_path=True, **kwargs): """ Load a case and set up a system without running routine. Return a system. Takes other kwargs recognizable by ``System``, such as ``addfile``, ``input_path``, and ``no_putput``. Parameters ---------- case: str Path to the test case codegen : bool, optional Call full `System.prepare` on the returned system. Set to True if one need to inspect pretty-print equations and run simulations. setup : bool, optional Call `System.setup` after loading use_input_path : bool, optional True to use the ``input_path`` argument to behave the same as ``andes.main.run``. Warnings -------- If one need to add devices in addition to these from the case file, do ``setup=False`` and call ``System.add()`` to add devices. When done, manually invoke ``setup()`` to set up the system. """ if use_input_path: input_path = kwargs.get('input_path', '') case = _find_cases(case, input_path) if len(case) > 1: logger.error("`andes.load` does not support mulitple cases.") return None elif len(case) == 0: logger.error("No valid case found.") return None case = case[0] system = System(case, **kwargs) if codegen: system.prepare() if not andes.io.parse(system): return None if setup: system.setup() return system def run_case(case, *, routine='pflow', profile=False, convert='', convert_all='', add_book=None, codegen=False, remove_pycapsule=False, **kwargs): """ Run single simulation case for the given full path. Use ``run`` instead of ``run_case`` whenever possible. Argument ``input_path`` will not be prepended to ``case``. Arguments recognizable by ``load`` can be passed to ``run_case``. Parameters ---------- case : str Full path to the test case routine : str, ('pflow', 'tds', 'eig') Computation routine to run profile : bool, optional True to enable profiler convert : str, optional Format name for case file conversion. convert_all : str, optional Format name for case file conversion, output sheets for all available devices. add_book : str, optional Name of the device to be added to an excel case as a new sheet. codegen : bool, optional True to run codegen remove_pycapsule : bool, optional True to remove pycapsule from C libraries. Useful when dill serialization is needed. """ pr = cProfile.Profile() # enable profiler if requested if profile is True: pr.enable() system = load(case, codegen=codegen, use_input_path=False, **kwargs) if system is None: return None skip_empty = True overwrite = None # convert to xlsx and process `add-book` option if add_book is not None: convert = 'xlsx' overwrite = True if convert_all != '': convert = 'xlsx' skip_empty = False # convert to the requested format if convert != '': andes.io.dump(system, convert, overwrite=overwrite, skip_empty=skip_empty, add_book=add_book) return system # run the requested routine if routine is not None: if isinstance(routine, str): routine = [routine] if 'pflow' in routine: routine = list(routine) routine.remove('pflow') if system.is_setup: system.PFlow.run(**kwargs) for
on simulated session. GitHub issue #1273") def test_fetch_history_ram_cycle_information_samples_to_read_all(multi_instrument_session): configure_for_history_ram_test(multi_instrument_session) history_ram_cycle_info = multi_instrument_session.sites[1].fetch_history_ram_cycle_information( position=0, samples_to_read=-1) assert len(history_ram_cycle_info) == 7 assert all([i.pattern_name == 'new_pattern' for i in history_ram_cycle_info]) time_set_names = [i.time_set_name for i in history_ram_cycle_info] assert time_set_names == ['t0', 'tScan', 'tScan', 't2X', 't2X', 't2X', 't0'] vector_numbers = [i.vector_number for i in history_ram_cycle_info] assert vector_numbers == [5, 6, 6, 7, 7, 8, 9] cycle_numbers = [i.cycle_number for i in history_ram_cycle_info] assert cycle_numbers == list(range(5, 12)) scan_cycle_numbers = [i.scan_cycle_number for i in history_ram_cycle_info] assert scan_cycle_numbers == [-1, 0, 1, -1, -1, -1, -1] pin_names = multi_instrument_session.get_pattern_pin_names('new_pattern') assert pin_names == ['LO' + str(i) for i in range(4)] + ['HI' + str(i) for i in range(4)] expected_pin_states = [i.expected_pin_states for i in history_ram_cycle_info] assert expected_pin_states == [ [[nidigital.PinState.ZERO, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.ZERO, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.ZERO, nidigital.PinState.ONE, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.H]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.ONE, nidigital.PinState.ZERO, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.L]], [[nidigital.PinState.ONE, nidigital.PinState.ONE, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.ZERO, nidigital.PinState.ZERO, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.ONE, nidigital.PinState.ONE, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.ZERO, nidigital.PinState.ZERO, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.ZERO, nidigital.PinState.ONE, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.ONE, nidigital.PinState.ZERO, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X]] ] # If test expects actual pin state to be 'X', then value returned by the returned can be anything. # So, need to skip those pin states while comparing. actual_pin_states = [i.actual_pin_states for i in history_ram_cycle_info] actual_pin_states_expected_by_test = [ [[nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.H]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.L]], [[nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.L, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.L, nidigital.PinState.H, nidigital.PinState.X, nidigital.PinState.X], [nidigital.PinState.H, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.H, nidigital.PinState.L, nidigital.PinState.X, nidigital.PinState.X]], [[nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X, nidigital.PinState.X]] ] assert len(actual_pin_states) == len(actual_pin_states_expected_by_test) for vector_pin_states, vector_pin_states_expected_by_test in zip(actual_pin_states, actual_pin_states_expected_by_test): for cycle_pin_states, cycle_pin_states_expected_by_test in zip(vector_pin_states, vector_pin_states_expected_by_test): for pin_state, pin_state_expected_by_test in zip(cycle_pin_states, cycle_pin_states_expected_by_test): if pin_state_expected_by_test is not nidigital.PinState.X: assert pin_state == pin_state_expected_by_test # Only the first cycle returned is expected to have failures per_pin_pass_fail = [i.per_pin_pass_fail for i in history_ram_cycle_info] assert per_pin_pass_fail == [ [[True, False, True, True, False, True, True, True]], [[True, True, True, True, True, True, True, True]], [[True, True, True, True, True, True, True, True]], [[True, True, True, True, True, True, True, True], [True, True, True, True, True, True, True, True]], [[True, True, True, True, True, True, True, True], [True, True, True, True, True, True, True, True]], [[True, True, True, True, True, True, True, True], [True, True, True, True, True, True, True, True]], [[True, True, True, True, True, True, True, True]], ] def test_fetch_history_ram_cycle_information_no_failures(multi_instrument_session): test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) multi_instrument_session.load_pattern(get_test_file_path(test_name, 'pattern.digipat')) multi_instrument_session.burst_pattern(start_label='new_pattern') history_ram_cycle_info = multi_instrument_session.sites[0].fetch_history_ram_cycle_information( position=0, samples_to_read=-1) assert len(history_ram_cycle_info) == 0 history_ram_cycle_info = multi_instrument_session.sites[0].fetch_history_ram_cycle_information( position=0, samples_to_read=0) assert len(history_ram_cycle_info) == 0 def test_get_pattern_pin_names(multi_instrument_session): # Also tests load_pattern test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) multi_instrument_session.load_pattern(get_test_file_path(test_name, 'pattern.digipat')) pattern_pin_names = multi_instrument_session.get_pattern_pin_names(start_label='new_pattern') assert pattern_pin_names == ['LO' + str(i) for i in range(4)] + ['HI' + str(i) for i in range(4)] def test_get_site_pass_fail(multi_instrument_session): test_files_folder = 'simple_pattern' configure_session(multi_instrument_session, test_files_folder) multi_instrument_session.load_pattern(get_test_file_path(test_files_folder, 'pattern.digipat')) multi_instrument_session.burst_pattern(start_label='new_pattern') pass_fail = multi_instrument_session.get_site_pass_fail() assert pass_fail == {0: True, 1: True, 2: True, 3: True} pass_fail = multi_instrument_session.sites[3, 0].get_site_pass_fail() assert pass_fail == {3: True, 0: True} def test_get_fail_count(multi_instrument_session): test_files_folder = 'simple_pattern' configure_session(multi_instrument_session, test_files_folder) multi_instrument_session.load_pattern(get_test_file_path(test_files_folder, 'pattern.digipat')) multi_instrument_session.burst_pattern(start_label='new_pattern') fail_count = multi_instrument_session.get_fail_count() assert fail_count == [0] * multi_instrument_session.channel_count fail_count = multi_instrument_session.pins['site0/LO0', 'site0/HI1', 'site2/HI3'].get_fail_count() assert fail_count == [0] * 3 def test_ppmu_measure(multi_instrument_session): test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) voltage_measurements = multi_instrument_session.pins['site0/LO0', 'site1/HI0'].ppmu_measure( nidigital.PPMUMeasurementType.VOLTAGE) assert len(voltage_measurements) == 2 def test_ppmu_source(multi_instrument_session): test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) multi_instrument_session.pins['site0/LO0', 'site1/HI0'].ppmu_source() def test_read_static(multi_instrument_session): test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) pin_states = multi_instrument_session.pins['site0/LO0', 'site1/HI0'].read_static() assert pin_states == [nidigital.PinState.L] * 2 def test_write_static(multi_instrument_session): test_name = 'simple_pattern' configure_session(multi_instrument_session, test_name) multi_instrument_session.pins['site0/LO0', 'site1/HI0'].write_static( nidigital.WriteStaticPinState.ONE) def test_read_sequencer_flag(multi_instrument_session): flag_state = multi_instrument_session.read_sequencer_flag(nidigital.SequencerFlag.FLAG1) assert flag_state is False def test_write_sequencer_flag(multi_instrument_session): multi_instrument_session.write_sequencer_flag(nidigital.SequencerFlag.FLAG2, True) def test_read_sequencer_register(multi_instrument_session): register_value = multi_instrument_session.read_sequencer_register( nidigital.SequencerRegister.REGISTER10) assert register_value == 0 def test_write_sequencer_register(multi_instrument_session): multi_instrument_session.write_sequencer_register( nidigital.SequencerRegister.REGISTER15, 65535) def test_configure_voltage_levels(multi_instrument_session): assert multi_instrument_session.vil == pytest.approx(0.0, abs=1e-4) assert multi_instrument_session.vih == pytest.approx(3.3, rel=1e-3) assert multi_instrument_session.vol == pytest.approx(1.6, rel=1e-3) assert multi_instrument_session.voh == pytest.approx(1.7, rel=1e-3) assert multi_instrument_session.vterm == pytest.approx(2.0, rel=1e-3) multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_voltage_levels( vil=1.0, vih=2.0, vol=3.0, voh=4.0, vterm=5.0) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].vil == pytest.approx(1.0, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].vih == pytest.approx(2.0, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].vol == pytest.approx(3.0, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].voh == pytest.approx(4.0, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].vterm == pytest.approx(5.0, rel=1e-3) def test_configure_active_load_levels(multi_instrument_session): assert multi_instrument_session.active_load_iol == pytest.approx(0.0015, rel=1e-3) assert multi_instrument_session.active_load_ioh == pytest.approx(-0.0015, rel=1e-3) assert multi_instrument_session.active_load_vcom == pytest.approx(2.0, rel=1e-3) multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_active_load_levels( iol=0.024, ioh=-0.024, vcom=3.0) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].active_load_iol == pytest.approx(0.024, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].active_load_ioh == pytest.approx(-0.024, rel=1e-3) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].active_load_vcom == pytest.approx(3.0, rel=1e-3) def test_clock_generator_abort(multi_instrument_session): multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].clock_generator_abort() def test_clock_generator_generate_clock(multi_instrument_session): multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].clock_generator_generate_clock( 1e6, True) def test_frequency_counter_measure_frequency(multi_instrument_session): multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].selected_function = nidigital.SelectedFunction.DIGITAL multi_instrument_session.pins['site0/PinA', 'site1/PinC'].frequency_counter_measurement_time = hightime.timedelta(milliseconds=5) frequencies = multi_instrument_session.pins['site0/PinA', 'site1/PinC'].frequency_counter_measure_frequency() assert frequencies == [0] * 2 def test_create_get_delete_time_sets(multi_instrument_session): '''Test basic time set methods. - create_time_set - delete_all_time_sets ''' time_set_a = 'time_set_abc' time_set_b = 'time_set_123' multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_a) multi_instrument_session.create_time_set(time_set_b) multi_instrument_session.delete_all_time_sets() def test_configure_get_time_set_period(multi_instrument_session): '''Test time set period methods. - configure_time_set_period - get_time_set_period ''' time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) assert multi_instrument_session.get_time_set_period(time_set_name) == hightime.timedelta(microseconds=1) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) assert multi_instrument_session.get_time_set_period(time_set_name) == time_set_period def test_configure_get_time_set_drive_format(multi_instrument_session): '''Test time set drive format methods. - configure_time_set_drive_format - get_time_set_drive_format ''' time_set_name = 'time_set_abc' time_set_drive_format = nidigital.DriveFormat.SBC multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_drive_format(time_set_name) == nidigital.DriveFormat.NR multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_drive_format(time_set_name, time_set_drive_format) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_drive_format(time_set_name) == time_set_drive_format def test_configure_get_time_set_edge(multi_instrument_session): '''Test time set individual edge methods. - configure_time_set_edge - get_time_set_edge ''' time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_drive_on = time_set_period * 0.5 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_ON) == hightime.timedelta(seconds=0) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_ON, time_set_drive_on) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_ON) == time_set_drive_on def test_configure_time_set_drive_edges(multi_instrument_session): time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_drive_format = nidigital.DriveFormat.RL time_set_drive_on = time_set_period * 0.1 time_set_drive_data = time_set_period * 0.2 time_set_drive_return = time_set_period * 0.8 time_set_drive_off = time_set_period * 0.9 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_drive_edges( time_set_name, time_set_drive_format, time_set_drive_on, time_set_drive_data, time_set_drive_return, time_set_drive_off) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_drive_format(time_set_name) == time_set_drive_format assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_ON) == time_set_drive_on assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_DATA) == time_set_drive_data assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_RETURN) == time_set_drive_return assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_OFF) == time_set_drive_off def test_configure_time_set_compare_edges_strobe(multi_instrument_session): time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_strobe = time_set_period * 0.5 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_compare_edges_strobe( time_set_name, time_set_strobe) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.COMPARE_STROBE) == time_set_strobe def test_configure_get_time_set_edge_multiplier(multi_instrument_session): '''Test time set edge multiplier methods. - configure_time_set_edge_multiplier - get_time_set_edge_multiplier ''' time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_edge_multiplier = 2 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge_multiplier(time_set_name) == 1 multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_edge_multiplier(time_set_name, time_set_edge_multiplier) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge_multiplier(time_set_name) == time_set_edge_multiplier def test_configure_time_set_drive_edges2x(multi_instrument_session): time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_drive_format = nidigital.DriveFormat.RL time_set_drive_on = time_set_period * 0.1 time_set_drive_data = time_set_period * 0.2 time_set_drive_return = time_set_period * 0.5 time_set_drive_data2 = time_set_period * 0.7 time_set_drive_return2 = time_set_period * 0.9 time_set_drive_off = time_set_period * 0.9 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_edge_multiplier(time_set_name, 2) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_drive_edges2x( time_set_name, time_set_drive_format, time_set_drive_on, time_set_drive_data, time_set_drive_return, time_set_drive_off, time_set_drive_data2, time_set_drive_return2) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_drive_format(time_set_name) == time_set_drive_format assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_ON) == time_set_drive_on assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_DATA) == time_set_drive_data assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_RETURN) == time_set_drive_return assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_OFF) == time_set_drive_off assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_DATA2) == time_set_drive_data2 assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.DRIVE_RETURN2) == time_set_drive_return2 def test_configure_time_set_compare_edges_strobe2x(multi_instrument_session): time_set_name = 'time_set_abc' time_set_period = hightime.timedelta(microseconds=10) time_set_strobe = time_set_period * 0.4 time_set_strobe2 = time_set_period * 0.8 multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) multi_instrument_session.create_time_set(time_set_name) multi_instrument_session.configure_time_set_period(time_set_name, time_set_period) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_edge_multiplier(time_set_name, 2) multi_instrument_session.pins['site0/PinA', 'site1/PinC'].configure_time_set_compare_edges_strobe2x( time_set_name, time_set_strobe, time_set_strobe2) assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.COMPARE_STROBE) == time_set_strobe assert multi_instrument_session.pins['site0/PinA', 'site1/PinC'].get_time_set_edge( time_set_name, nidigital.TimeSetEdgeType.COMPARE_STROBE2) == time_set_strobe2 def test_enable_disable_sites_single(multi_instrument_session): '''Test methods for single site enable configuration. - enable_sites - disable_sites - is_site_enabled ''' multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) assert multi_instrument_session.sites[1].is_site_enabled() # Single site configuration multi_instrument_session.sites[1].disable_sites() assert not multi_instrument_session.sites[1].is_site_enabled() multi_instrument_session.sites[1].enable_sites() assert multi_instrument_session.sites[1].is_site_enabled() def test_enable_disable_sites_multiple(multi_instrument_session): '''Test methods for multiple site enable configuration. - enable_sites - disable_sites - is_site_enabled ''' multi_instrument_session.load_pin_map(os.path.join(test_files_base_dir, "pin_map.pinmap")) assert multi_instrument_session.sites[0].is_site_enabled() assert multi_instrument_session.sites[1].is_site_enabled() # Multiple site configuration multi_instrument_session.sites[0, 1].disable_sites() assert not multi_instrument_session.sites[0].is_site_enabled() assert not multi_instrument_session.sites[1].is_site_enabled() multi_instrument_session.sites[0, 1].enable_sites() assert
<gh_stars>0 # Software License Agreement (BSD License) # # Copyright (c) 2009-2014, Eucalyptus Systems, Inc. # All rights reserved. # # Redistribution and use of this software in source and binary forms, with or # without modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other # materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: <EMAIL> from boto.iam import IAMConnection from boto.exception import BotoServerError import json import os import re import urllib from prettytable import PrettyTable from nephoria.baseops.botobaseops import BotoBaseOps class IAMops(BotoBaseOps): EUCARC_URL_NAME = 'iam_url' SERVICE_PREFIX = 'iam' CONNECTION_CLASS = IAMConnection def setup_resource_trackers(self): ## add test resource trackers and cleanup methods... self.test_resources["iam_accounts"] = self.test_resources.get('iam_accounts', []) self.test_resources_clean_methods["iam_accounts"] = None def create_account(self, account_name, ignore_existing=True): """ Create an account with the given name :param account_name: str name of account to create """ params = {'AccountName': account_name} try: res = self.get_response_items('CreateAccount', params, item_marker='account') self.log.debug("Created account: " + account_name) except BotoServerError as BE: if not (BE.status == 409 and ignore_existing): raise res = self.get_account(account_name=account_name) self.log.debug("create_account(). Account already exists: " + account_name) self.test_resources["iam_accounts"].append(account_name) return res def delete_account(self, account_name, recursive=False): """ Delete an account with the given name :param account_name: str name of account to delete :param recursive: """ self.log.debug("Deleting account: " + account_name) params = { 'AccountName': account_name, 'Recursive': recursive } self.connection.get_response('DeleteAccount', params) def get_all_accounts(self, account_id=None, account_name=None, search=False): """ Request all accounts, return account dicts that match given criteria :param account_id: regex string - to use for account_name :param account_name: regex - to use for account ID :param search: boolean - specify whether to use match or search when filtering the returned list :return: list of account names """ if search: re_meth = re.search else: re_meth = re.match if account_id and not re.match("\d{12}", account_id): if not account_name: account_name = account_id account_id = None self.log.debug('Attempting to fetch all accounts matching- account_id:' + str(account_id) + ' account_name:' + str(account_name)) response = self.get_response_items('ListAccounts', {}, item_marker='accounts', list_marker='Accounts') retlist = [] for account in response: if account_name is not None: if not search: account_name = "^{0}$".format(account_name.strip()) if not re_meth(account_name, account['account_name']): continue if account_id is not None: if not search: account_id = "^{0}$".format(account_id .strip()) if not re_meth(account['account_id'], account_id): continue retlist.append(account) return retlist def get_account(self, account_id=None, account_name=None, search=False): """ Request a specific account, returns an account dict that matches the given criteria :param account_id: regex string - to use for account_name :param account_name: regex - to use for account ID :param search: boolean - specify whether to use match or search when filtering the returned list :return: account dict """ if not (account_id or account_name): aliases = self.get_account_aliases() if aliases: account_name = aliases[0] else: raise ValueError('get_account(). Account id, name, or alias not found') accounts = self.get_all_accounts(account_id=account_id, account_name=account_name, search=search) if accounts: if len(accounts) > 1: raise ValueError('get_account matched more than a single account with the ' 'provided criteria: account_id="{0}", account_name="{1}". ' 'Matched:{2}' .format(account_id, account_name, ", ".join(str(x) for x in accounts))) else: return accounts[0] return None def create_user(self, user_name, path="/", delegate_account=None, ignore_existing=True): """ Create a user :param user_name: str name of user :param path: str user path :param delegate_account: str can be used by Cloud sys_admin in Eucalyptus to choose an account to operate on """ if not user_name: # Assuming this could be part of a test, allow it but warn... self.log.warning('create_user(). Passed unsupported user_name:"{0}"' .format(user_name)) params = {'UserName': user_name, 'Path': path } if delegate_account: params['DelegateAccount'] = delegate_account try: res = self.get_response_items('CreateUser', params, item_marker='user') self.log.debug('Created user:"{0}"'.format(user_name)) except BotoServerError as BE: if not (BE.status == 409 and ignore_existing): raise res = self.get_user(user_name=user_name, delegate_account=delegate_account) self.log.debug("create_user(). User already exists: " + user_name) return res def get_user(self, user_name=None, delegate_account=None): params = {} if user_name: params['UserName'] = user_name if delegate_account: params['DelegateAccount'] = delegate_account return self.get_response_items('GetUser', params, item_marker='user') def delete_user(self, user_name, delegate_account=None): """ Delete a user :param user_name: str name of user :param delegate_account: str can be used by Cloud sys_admin in Eucalyptus to choose an account to operate on """ self.log.debug("Deleting user " + user_name) params = {'UserName': user_name} if delegate_account: params['DelegateAccount'] = delegate_account self.connection.get_response('DeleteUser', params) def get_users_from_account(self, path=None, user_name=None, user_id=None, delegate_account=None, search=False): """ Returns access that match given criteria. By default will return current account. :param path: regex - to match for path :param user_name: str name of user :param user_id: regex - to match for user_id :param delegate_account: str can be used by Cloud sys_admin in Eucalyptus to choose an account to operate on :param search: use regex search (any occurrence) rather than match (exact same strings must occur) :return: """ self.log.debug('Attempting to fetch all access matching- user_id:' + str(user_id) + ' user_name:' + str(user_name) + " acct_name:" + str(delegate_account)) retlist = [] params = {} if search: re_meth = re.search else: re_meth = re.match if delegate_account: params['DelegateAccount'] = delegate_account response = self.get_response_items('ListUsers', params, item_marker='users', list_marker='Users') for user in response: if path is not None and not re_meth(path, user['path']): continue if user_name is not None and not re_meth(user_name, user['user_name']): continue if user_id is not None and not re_meth(user_id, user['user_id']): continue retlist.append(user) return retlist def show_all_accounts(self, account_name=None, account_id=None, search=False, print_table=True): """ Debug Method to print an account list based on given filter criteria :param account_name: regex - to use for account_name :param account_id: regex - to use for account_id :param search: boolean - specify whether to use match or search when filtering the returned list """ pt = PrettyTable(['ACCOUNT_NAME', 'ACCOUNT_ID']) pt.hrules = 1 pt.align = 'l' list = self.get_all_accounts(account_name=account_name, account_id=account_id, search=search) for account in list: pt.add_row([account['account_name'], account['account_id']]) if print_table: self.log.info("\n" + str(pt) + "\n") else: return pt def show_all_groups(self, account_name=None, account_id=None, path=None, group_name=None, group_id=None, search=False, print_table=True): """ Print all groups in an account :param account_name: regex - to use for account_name :param account_id: regex - to use for :param path: regex - to match for path :param group_name: regex - to match for user_name :param group_id: regex - to match for user_id :param search: boolean - specify whether to use match or search when filtering the returned list """ pt = PrettyTable(['ACCOUNT:', 'GROUPNAME:', 'GROUP_ID:']) pt.hrules = 1 pt.align = 'l' list = self.get_all_groups(account_name=account_name, account_id=account_id, path=path, group_name=group_name, group_id=group_id, search=search) for group in list: pt.add_row([group['account_name'], group['group_name'], group['group_id']]) if print_table: self.log.info("\n" + str(pt) + "\n") else: return pt def show_all_users(self, account_name=None, account_id=None, path=None, user_name=None, user_id=None, search=False, print_table=True ): """ Debug Method to print a user list based on given filter criteria :param account_name: regex - to use for account_name :param account_id: regex - to use for :param path: regex - to match for path :param user_name: regex - to match for user_name :param user_id: regex - to match for user_id :param search: boolean - specify whether to use match or search when filtering the returned list """ pt = PrettyTable(['ACCOUNT:', 'USERNAME:', 'USER_ID', 'ACCT_ID']) pt.hrules = 1 pt.align = 'l' list = self.get_all_users(account_name=account_name, account_id=account_id, path=path, user_name=user_name, user_id=user_id, search=search) for user in list: pt.add_row([user['account_name'], user['user_name'], user['user_id'], user['account_id']]) if print_table: self.log.info("\n" + str(pt) + "\n") else: return pt def get_account_aliases(self, delegate_account=None): params = {} if delegate_account: params['DelegateAccount'] = delegate_account resp = self.get_response_items('ListAccountAliases', params, item_marker='account_aliases', list_marker='AccountAliases') or [] return resp def get_username_for_active_connection(self): """ Helper