input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
if self . smr_invoked_bit else False )
if 87 - 87: i11iIiiIii
if 34 - 34: i1IIi
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 64 - 64: iIii1I11I1II1 / IiII / Oo0Ooo - I1ii11iIi11i
if 100 - 100: IiII + i1IIi * OoO0O00
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 64 - 64: oO0o * i11iIiiIii . Oo0Ooo
if 52 - 52: Oo0Ooo / ooOoO0o / iII111i - o0oOOo0O0Ooo / iII111i
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 74 - 74: i1IIi . iIii1I11I1II1
if 85 - 85: I1IiiI
if 10 - 10: O0 . II111iiii / OoooooooOO
if 72 - 72: OoooooooOO . o0oOOo0O0Ooo + O0
if 46 - 46: OoOoOO00 * I11i / oO0o + Oo0Ooo + IiII
if 95 - 95: o0oOOo0O0Ooo - Ii1I
if 67 - 67: I1ii11iIi11i * Oo0Ooo % o0oOOo0O0Ooo
if 19 - 19: OoOoOO00 . OOooOOo . OoooooooOO
if 79 - 79: OOooOOo * ooOoO0o * I1IiiI * I1ii11iIi11i / I1ii11iIi11i
if 62 - 62: ooOoO0o * Ii1I % I1ii11iIi11i - i1IIi - I1ii11iIi11i
if 24 - 24: OOooOOo
if 71 - 71: IiII - i1IIi
if 56 - 56: OoOoOO00 + oO0o
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
if 19 - 19: IiII % OoooooooOO + OoooooooOO
if 7 - 7: i1IIi
if 91 - 91: OoOoOO00 - OoOoOO00 . IiII
if 33 - 33: I1Ii111 - iIii1I11I1II1 / Ii1I % O0
if 80 - 80: IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
if 35 - 35: I1IiiI
if 48 - 48: OoooooooOO % OoooooooOO - OoO0O00 . OoOoOO00
if 22 - 22: ooOoO0o . i11iIiiIii . OoooooooOO . i1IIi
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
if 41 - 41: OoooooooOO
if 13 - 13: I11i + I1Ii111 - I1Ii111 % oO0o / I11i
if 4 - 4: I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if 29 - 29: II111iiii
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if 37 - 37: i1IIi * i11iIiiIii
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
if 35 - 35: iII111i * OOooOOo
if 65 - 65: II111iiii % i1IIi
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
if 31 - 31: OoO0O00
if 68 - 68: OoO0O00 + i1IIi / iIii1I11I1II1 + II111iiii * iIii1I11I1II1 + I1ii11iIi11i
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
if 9 - 9: o0oOOo0O0Ooo
if 55 - 55: OOooOOo % iIii1I11I1II1 + I11i . ooOoO0o
if 71 - 71: i11iIiiIii / i1IIi + OoOoOO00
if 23 - 23: i11iIiiIii
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 88 - 88: II111iiii - iII111i / OoooooooOO
if 71 - 71: I1ii11iIi11i
def print_map_register ( self ) :
IIIIiiii = lisp_hex_string ( self . xtr_id )
if 20 - 20: i1IIi * iII111i + OoO0O00 * OoO0O00 / Oo0Ooo
i11ii = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 83 - 83: I1ii11iIi11i
lprint ( i11ii . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# OOooOOo % i11iIiiIii + OoO0O00 * OoO0O00
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , IIIIiiii , self . site_id ) )
if 32 - 32: Ii1I - Ii1I
if 6 - 6: iIii1I11I1II1 - i11iIiiIii / I1ii11iIi11i - o0oOOo0O0Ooo
if 95 - 95: I11i
if 76 - 76: II111iiii - i1IIi . O0 * i11iIiiIii % o0oOOo0O0Ooo - iII111i
def encode ( self ) :
O0ooOo0Oooo = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : O0ooOo0Oooo |= 0x08000000
if ( self . lisp_sec_present ) : O0ooOo0Oooo |= 0x04000000
if ( self . xtr_id_present ) : O0ooOo0Oooo |= 0x02000000
if ( self . map_register_refresh ) : O0ooOo0Oooo |= 0x1000
if ( self . use_ttl_for_timeout ) : O0ooOo0Oooo |= 0x800
if ( self . merge_register_requested ) : O0ooOo0Oooo |= 0x400
if ( self . mobile_node ) : O0ooOo0Oooo |= 0x200
if ( self . map_notify_requested ) : O0ooOo0Oooo |= 0x100
if ( self . encryption_key_id != None ) :
O0ooOo0Oooo |= 0x2000
O0ooOo0Oooo |= self . encryption_key_id << 14
if 30 - 30: I1Ii111 % oO0o + oO0o * OoooooooOO - I1ii11iIi11i
if 69 - 69: I1ii11iIi11i + OoO0O00 / O0 + II111iiii / i11iIiiIii
if 48 - 48: OoooooooOO / I1IiiI
if 19 - 19: OOooOOo * I1ii11iIi11i - ooOoO0o * i11iIiiIii + I11i
if 92 - 92: OoO0O00
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 6 - 6: OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
iI1IIII1ii1 = struct . pack ( "I" , socket . htonl ( O0ooOo0Oooo ) )
iI1IIII1ii1 += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
iI1IIII1ii1 = self . zero_auth ( iI1IIII1ii1 )
return ( iI1IIII1ii1 )
if 93 - 93: iIii1I11I1II1 / OoOoOO00 % Oo0Ooo * I1Ii111 - OoO0O00 - o0oOOo0O0Ooo
if 44 - 44: | |
643: b'NtWow64CsrAllocateMessagePointer',
644: b'NtWow64CsrCaptureMessageBuffer',
645: b'NtWow64CsrCaptureMessageString',
646: b'NtWow64CsrClientCallServer',
647: b'NtWow64CsrClientConnectToServer',
648: b'NtWow64CsrFreeCaptureBuffer',
649: b'NtWow64CsrGetProcessId',
650: b'NtWow64CsrIdentifyAlertableThread',
651: b'NtWow64CsrVerifyRegion',
652: b'NtWow64DebuggerCall',
653: b'NtWow64GetCurrentProcessorNumberEx',
654: b'NtWow64GetNativeSystemInformation',
655: b'NtWow64IsProcessorFeaturePresent',
656: b'NtWow64QueryInformationProcess64',
657: b'NtWow64ReadVirtualMemory64',
658: b'NtWow64WriteVirtualMemory64',
659: b'NtWriteFile',
660: b'NtWriteFileGather',
661: b'NtWriteRequestData',
662: b'NtWriteVirtualMemory',
663: b'NtYieldExecution',
664: b'NtdllDefWindowProc_A',
665: b'NtdllDefWindowProc_W',
666: b'NtdllDialogWndProc_A',
667: b'NtdllDialogWndProc_W',
668: b'PfxFindPrefix',
669: b'PfxInitialize',
670: b'PfxInsertPrefix',
671: b'PfxRemovePrefix',
672: b'PssNtCaptureSnapshot',
673: b'PssNtDuplicateSnapshot',
674: b'PssNtFreeRemoteSnapshot',
675: b'PssNtFreeSnapshot',
676: b'PssNtFreeWalkMarker',
677: b'PssNtQuerySnapshot',
678: b'PssNtValidateDescriptor',
679: b'PssNtWalkSnapshot',
680: b'RtlAbortRXact',
681: b'RtlAbsoluteToSelfRelativeSD',
682: b'RtlAcquirePebLock',
683: b'RtlAcquirePrivilege',
684: b'RtlAcquireReleaseSRWLockExclusive',
685: b'RtlAcquireResourceExclusive',
686: b'RtlAcquireResourceShared',
687: b'RtlAcquireSRWLockExclusive',
688: b'RtlAcquireSRWLockShared',
689: b'RtlActivateActivationContext',
690: b'RtlActivateActivationContextEx',
691: b'RtlAddAccessAllowedAce',
692: b'RtlAddAccessAllowedAceEx',
693: b'RtlAddAccessAllowedObjectAce',
694: b'RtlAddAccessDeniedAce',
695: b'RtlAddAccessDeniedAceEx',
696: b'RtlAddAccessDeniedObjectAce',
697: b'RtlAddAce',
698: b'RtlAddActionToRXact',
699: b'RtlAddAtomToAtomTable',
700: b'RtlAddAttributeActionToRXact',
701: b'RtlAddAuditAccessAce',
702: b'RtlAddAuditAccessAceEx',
703: b'RtlAddAuditAccessObjectAce',
704: b'RtlAddCompoundAce',
705: b'RtlAddIntegrityLabelToBoundaryDescriptor',
706: b'RtlAddMandatoryAce',
707: b'RtlAddProcessTrustLabelAce',
708: b'RtlAddRefActivationContext',
709: b'RtlAddRefMemoryStream',
710: b'RtlAddResourceAttributeAce',
711: b'RtlAddSIDToBoundaryDescriptor',
712: b'RtlAddScopedPolicyIDAce',
713: b'RtlAddVectoredContinueHandler',
714: b'RtlAddVectoredExceptionHandler',
715: b'RtlAddressInSectionTable',
716: b'RtlAdjustPrivilege',
717: b'RtlAllocateActivationContextStack',
718: b'RtlAllocateAndInitializeSid',
719: b'RtlAllocateAndInitializeSidEx',
720: b'RtlAllocateHandle',
721: b'RtlAllocateHeap',
722: b'RtlAllocateMemoryBlockLookaside',
723: b'RtlAllocateMemoryZone',
724: b'RtlAllocateWnfSerializationGroup',
725: b'RtlAnsiCharToUnicodeChar',
726: b'RtlAnsiStringToUnicodeSize',
727: b'RtlAnsiStringToUnicodeString',
728: b'RtlAppendAsciizToString',
729: b'RtlAppendPathElement',
730: b'RtlAppendStringToString',
731: b'RtlAppendUnicodeStringToString',
732: b'RtlAppendUnicodeToString',
733: b'RtlApplicationVerifierStop',
734: b'RtlApplyRXact',
735: b'RtlApplyRXactNoFlush',
736: b'RtlAppxIsFileOwnedByTrustedInstaller',
737: b'RtlAreAllAccessesGranted',
738: b'RtlAreAnyAccessesGranted',
739: b'RtlAreBitsClear',
740: b'RtlAreBitsSet',
741: b'RtlAreLongPathsEnabled',
742: b'RtlAssert',
743: b'RtlAvlInsertNodeEx',
744: b'RtlAvlRemoveNode',
745: b'RtlBarrier',
746: b'RtlBarrierForDelete',
747: b'RtlCancelTimer',
748: b'RtlCanonicalizeDomainName',
749: b'RtlCapabilityCheck',
750: b'RtlCaptureContext',
751: b'RtlCaptureStackBackTrace',
752: b'RtlCaptureStackContext',
753: b'RtlCharToInteger',
754: b'RtlCheckBootStatusIntegrity',
755: b'RtlCheckForOrphanedCriticalSections',
756: b'RtlCheckPortableOperatingSystem',
757: b'RtlCheckRegistryKey',
758: b'RtlCheckSandboxedToken',
759: b'RtlCheckTokenCapability',
760: b'RtlCheckTokenMembership',
761: b'RtlCheckTokenMembershipEx',
762: b'RtlCleanUpTEBLangLists',
763: b'RtlClearAllBits',
764: b'RtlClearBit',
765: b'RtlClearBits',
766: b'RtlClearThreadWorkOnBehalfTicket',
767: b'RtlCloneMemoryStream',
768: b'RtlCloneUserProcess',
769: b'RtlCmDecodeMemIoResource',
770: b'RtlCmEncodeMemIoResource',
771: b'RtlCommitDebugInfo',
772: b'RtlCommitMemoryStream',
773: b'RtlCompactHeap',
774: b'RtlCompareAltitudes',
775: b'RtlCompareMemory',
776: b'RtlCompareMemoryUlong',
777: b'RtlCompareString',
778: b'RtlCompareUnicodeString',
779: b'RtlCompareUnicodeStrings',
780: b'RtlCompressBuffer',
781: b'RtlComputeCrc32',
782: b'RtlComputeImportTableHash',
783: b'RtlComputePrivatizedDllName_U',
784: b'RtlConnectToSm',
785: b'RtlConsoleMultiByteToUnicodeN',
786: b'RtlContractHashTable',
787: b'RtlConvertDeviceFamilyInfoToString',
788: b'RtlConvertExclusiveToShared',
789: b'RtlConvertLCIDToString',
790: b'RtlConvertLongToLargeInteger',
791: b'RtlConvertSRWLockExclusiveToShared',
792: b'RtlConvertSharedToExclusive',
793: b'RtlConvertSidToUnicodeString',
794: b'RtlConvertToAutoInheritSecurityObject',
795: b'RtlConvertUlongToLargeInteger',
796: b'RtlCopyBitMap',
797: b'RtlCopyContext',
798: b'RtlCopyExtendedContext',
799: b'RtlCopyLuid',
800: b'RtlCopyLuidAndAttributesArray',
801: b'RtlCopyMappedMemory',
802: b'RtlCopyMemoryStreamTo',
803: b'RtlCopyOutOfProcessMemoryStreamTo',
804: b'RtlCopySecurityDescriptor',
805: b'RtlCopySid',
806: b'RtlCopySidAndAttributesArray',
807: b'RtlCopyString',
808: b'RtlCopyUnicodeString',
809: b'RtlCrc32',
810: b'RtlCrc64',
811: b'RtlCreateAcl',
812: b'RtlCreateActivationContext',
813: b'RtlCreateAndSetSD',
814: b'RtlCreateAtomTable',
815: b'RtlCreateBootStatusDataFile',
816: b'RtlCreateBoundaryDescriptor',
817: b'RtlCreateEnvironment',
818: b'RtlCreateEnvironmentEx',
819: b'RtlCreateHashTable',
820: b'RtlCreateHashTableEx',
821: b'RtlCreateHeap',
822: b'RtlCreateMemoryBlockLookaside',
823: b'RtlCreateMemoryZone',
824: b'RtlCreateProcessParameters',
825: b'RtlCreateProcessParametersEx',
826: b'RtlCreateProcessReflection',
827: b'RtlCreateQueryDebugBuffer',
828: b'RtlCreateRegistryKey',
829: b'RtlCreateSecurityDescriptor',
830: b'RtlCreateServiceSid',
831: b'RtlCreateSystemVolumeInformationFolder',
832: b'RtlCreateTagHeap',
833: b'RtlCreateTimer',
834: b'RtlCreateTimerQueue',
835: b'RtlCreateUnicodeString',
836: b'RtlCreateUnicodeStringFromAsciiz',
837: b'RtlCreateUserProcess',
838: b'RtlCreateUserSecurityObject',
839: b'RtlCreateUserStack',
840: b'RtlCreateUserThread',
841: b'RtlCreateVirtualAccountSid',
842: b'RtlCultureNameToLCID',
843: b'RtlCustomCPToUnicodeN',
844: b'RtlCutoverTimeToSystemTime',
845: b'RtlDeCommitDebugInfo',
846: b'RtlDeNormalizeProcessParams',
847: b'RtlDeactivateActivationContext',
848: b'RtlDebugPrintTimes',
849: b'RtlDecodePointer',
850: b'RtlDecodeRemotePointer',
851: b'RtlDecodeSystemPointer',
852: b'RtlDecompressBuffer',
853: b'RtlDecompressBufferEx',
854: b'RtlDecompressFragment',
855: b'RtlDefaultNpAcl',
856: b'RtlDelete',
857: b'RtlDeleteAce',
858: b'RtlDeleteAtomFromAtomTable',
859: b'RtlDeleteBarrier',
860: b'RtlDeleteBoundaryDescriptor',
861: b'RtlDeleteCriticalSection',
862: b'RtlDeleteElementGenericTable',
863: b'RtlDeleteElementGenericTableAvl',
864: b'RtlDeleteElementGenericTableAvlEx',
865: b'RtlDeleteHashTable',
866: b'RtlDeleteNoSplay',
867: b'RtlDeleteRegistryValue',
868: b'RtlDeleteResource',
869: b'RtlDeleteSecurityObject',
870: b'RtlDeleteTimer',
871: b'RtlDeleteTimerQueue',
872: b'RtlDeleteTimerQueueEx',
873: b'RtlDeregisterSecureMemoryCacheCallback',
874: b'RtlDeregisterWait',
875: b'RtlDeregisterWaitEx',
876: b'RtlDeriveCapabilitySidsFromName',
877: b'RtlDestroyAtomTable',
878: b'RtlDestroyEnvironment',
879: b'RtlDestroyHandleTable',
880: b'RtlDestroyHeap',
881: b'RtlDestroyMemoryBlockLookaside',
882: b'RtlDestroyMemoryZone',
883: b'RtlDestroyProcessParameters',
884: b'RtlDestroyQueryDebugBuffer',
885: b'RtlDetectHeapLeaks',
886: b'RtlDetermineDosPathNameType_U',
887: b'RtlDisableThreadProfiling',
888: b'RtlDllShutdownInProgress',
889: b'RtlDnsHostNameToComputerName',
890: b'RtlDoesFileExists_U',
891: b'RtlDosApplyFileIsolationRedirection_Ustr',
892: b'RtlDosPathNameToNtPathName_U',
893: b'RtlDosPathNameToNtPathName_U_WithStatus',
894: b'RtlDosPathNameToRelativeNtPathName_U',
895: b'RtlDosPathNameToRelativeNtPathName_U_WithStatus',
896: b'RtlDosSearchPath_U',
897: b'RtlDosSearchPath_Ustr',
898: b'RtlDowncaseUnicodeChar',
899: b'RtlDowncaseUnicodeString',
900: b'RtlDumpResource',
901: b'RtlDuplicateUnicodeString',
902: b'RtlEmptyAtomTable',
903: b'RtlEnableEarlyCriticalSectionEventCreation',
904: b'RtlEnableThreadProfiling',
905: b'RtlEncodePointer',
906: b'RtlEncodeRemotePointer',
907: b'RtlEncodeSystemPointer',
908: b'RtlEndEnumerationHashTable',
909: b'RtlEndStrongEnumerationHashTable',
910: b'RtlEndWeakEnumerationHashTable',
911: b'RtlEnlargedIntegerMultiply',
912: b'RtlEnlargedUnsignedMultiply',
913: b'RtlEnterCriticalSection',
914: b'RtlEnumProcessHeaps',
915: b'RtlEnumerateEntryHashTable',
916: b'RtlEnumerateGenericTable',
917: b'RtlEnumerateGenericTableAvl',
918: b'RtlEnumerateGenericTableLikeADirectory',
919: b'RtlEnumerateGenericTableWithoutSplaying',
920: b'RtlEnumerateGenericTableWithoutSplayingAvl',
921: b'RtlEqualComputerName',
922: b'RtlEqualDomainName',
923: b'RtlEqualLuid',
924: b'RtlEqualPrefixSid',
925: b'RtlEqualSid',
926: b'RtlEqualString',
927: b'RtlEqualUnicodeString',
928: b'RtlEqualWnfChangeStamps',
929: b'RtlEraseUnicodeString',
930: b'RtlEthernetAddressToStringA',
931: b'RtlEthernetAddressToStringW',
932: b'RtlEthernetStringToAddressA',
933: b'RtlEthernetStringToAddressW',
934: b'RtlExitUserProcess',
935: b'RtlExitUserThread',
936: b'RtlExpandEnvironmentStrings',
937: b'RtlExpandEnvironmentStrings_U',
938: b'RtlExpandHashTable',
939: b'RtlExtendMemoryBlockLookaside',
940: b'RtlExtendMemoryZone',
941: b'RtlExtendedIntegerMultiply',
942: b'RtlExtendedLargeIntegerDivide',
943: b'RtlExtendedMagicDivide',
944: b'RtlExtractBitMap',
945: b'RtlFillMemory',
946: b'RtlFillMemoryUlong',
947: b'RtlFillMemoryUlonglong',
948: b'RtlFinalReleaseOutOfProcessMemoryStream',
949: b'RtlFindAceByType',
950: b'RtlFindActivationContextSectionGuid',
951: b'RtlFindActivationContextSectionString',
952: b'RtlFindCharInUnicodeString',
953: b'RtlFindClearBits',
954: b'RtlFindClearBitsAndSet',
955: b'RtlFindClearRuns',
956: b'RtlFindClosestEncodableLength',
957: b'RtlFindExportedRoutineByName',
958: b'RtlFindLastBackwardRunClear',
959: b'RtlFindLeastSignificantBit',
960: b'RtlFindLongestRunClear',
961: b'RtlFindMessage',
962: b'RtlFindMostSignificantBit',
963: b'RtlFindNextForwardRunClear',
964: b'RtlFindSetBits',
965: b'RtlFindSetBitsAndClear',
966: b'RtlFindUnicodeSubstring',
967: b'RtlFirstEntrySList',
968: b'RtlFirstFreeAce',
969: b'RtlFlsAlloc',
970: b'RtlFlsFree',
971: b'RtlFlushHeaps',
972: b'RtlFlushSecureMemoryCache',
973: b'RtlFormatCurrentUserKeyPath',
974: b'RtlFormatMessage',
975: b'RtlFormatMessageEx',
976: b'RtlFreeActivationContextStack',
977: b'RtlFreeAnsiString',
978: b'RtlFreeHandle',
979: b'RtlFreeHeap',
980: b'RtlFreeMemoryBlockLookaside',
981: b'RtlFreeOemString',
982: b'RtlFreeSid',
983: b'RtlFreeThreadActivationContextStack',
984: b'RtlFreeUnicodeString',
985: b'RtlFreeUserStack',
986: b'RtlGUIDFromString',
987: b'RtlGenerate8dot3Name',
988: b'RtlGetAce',
989: b'RtlGetActiveActivationContext',
990: b'RtlGetActiveConsoleId',
991: b'RtlGetAppContainerNamedObjectPath',
992: b'RtlGetAppContainerParent',
993: b'RtlGetAppContainerSidType',
994: b'RtlGetCallersAddress',
995: b'RtlGetCompressionWorkSpaceSize',
996: b'RtlGetConsoleSessionForegroundProcessId',
997: b'RtlGetControlSecurityDescriptor',
998: b'RtlGetCriticalSectionRecursionCount',
999: b'RtlGetCurrentDirectory_U',
1000: b'RtlGetCurrentPeb',
1001: b'RtlGetCurrentProcessorNumber',
1002: b'RtlGetCurrentProcessorNumberEx',
1003: b'RtlGetCurrentServiceSessionId',
1004: b'RtlGetCurrentTransaction',
1005: b'RtlGetDaclSecurityDescriptor',
1006: b'RtlGetDeviceFamilyInfoEnum',
1007: b'RtlGetElementGenericTable',
1008: b'RtlGetElementGenericTableAvl',
1009: b'RtlGetEnabledExtendedFeatures',
1010: b'RtlGetExePath',
1011: b'RtlGetExtendedContextLength',
1012: b'RtlGetExtendedFeaturesMask',
1013: b'RtlGetFileMUIPath',
1014: b'RtlGetFrame',
1015: b'RtlGetFullPathName_U',
1016: b'RtlGetFullPathName_UEx',
1017: b'RtlGetFullPathName_UstrEx',
1018: b'RtlGetGroupSecurityDescriptor',
1019: b'RtlGetIntegerAtom',
1020: b'RtlGetInterruptTimePrecise',
1021: b'RtlGetLastNtStatus',
1022: b'RtlGetLastWin32Error',
1023: b'RtlGetLengthWithoutLastFullDosOrNtPathElement',
1024: b'RtlGetLengthWithoutTrailingPathSeperators',
1025: b'RtlGetLocaleFileMappingAddress',
1026: b'RtlGetLongestNtPathLength',
1027: b'RtlGetNativeSystemInformation',
1028: b'RtlGetNextEntryHashTable',
1029: b'RtlGetNtGlobalFlags',
1030: b'RtlGetNtProductType',
1031: b'RtlGetNtVersionNumbers',
1032: b'RtlGetOwnerSecurityDescriptor',
1033: b'RtlGetParentLocaleName',
1034: b'RtlGetProcessHeaps',
1035: b'RtlGetProcessPreferredUILanguages',
1036: b'RtlGetProductInfo',
1037: b'RtlGetSaclSecurityDescriptor',
1038: b'RtlGetSearchPath',
1039: b'RtlGetSecurityDescriptorRMControl',
1040: b'RtlGetSetBootStatusData',
1041: b'RtlGetSuiteMask',
1042: b'RtlGetSystemPreferredUILanguages',
1043: b'RtlGetSystemTimePrecise',
1044: b'RtlGetThreadErrorMode',
1045: b'RtlGetThreadLangIdByIndex',
1046: b'RtlGetThreadPreferredUILanguages',
1047: b'RtlGetThreadWorkOnBehalfTicket',
1048: b'RtlGetUILanguageInfo',
1049: b'RtlGetUnloadEventTrace',
1050: b'RtlGetUnloadEventTraceEx',
1051: b'RtlGetUserInfoHeap',
1052: b'RtlGetUserPreferredUILanguages',
1053: b'RtlGetVersion',
1054: b'RtlGuardCheckLongJumpTarget',
1055: b'RtlHashUnicodeString',
1056: b'RtlHeapTrkInitialize',
1057: b'RtlIdentifierAuthoritySid',
1058: b'RtlIdnToAscii',
1059: b'RtlIdnToNameprepUnicode',
1060: b'RtlIdnToUnicode',
1061: b'RtlImageDirectoryEntryToData',
1062: b'RtlImageNtHeader',
1063: b'RtlImageNtHeaderEx',
1064: b'RtlImageRvaToSection',
1065: b'RtlImageRvaToVa',
1066: b'RtlImpersonateSelf',
1067: b'RtlImpersonateSelfEx',
1068: b'RtlInitAnsiString',
1069: b'RtlInitAnsiStringEx',
1070: b'RtlInitBarrier',
1071: b'RtlInitCodePageTable',
1072: b'RtlInitEnumerationHashTable',
1073: b'RtlInitMemoryStream',
1074: b'RtlInitNlsTables',
1075: b'RtlInitOutOfProcessMemoryStream',
1076: b'RtlInitString',
1077: b'RtlInitStringEx',
1078: b'RtlInitStrongEnumerationHashTable',
1079: b'RtlInitUnicodeString',
1080: b'RtlInitUnicodeStringEx',
1081: b'RtlInitWeakEnumerationHashTable',
1082: b'RtlInitializeAtomPackage',
1083: b'RtlInitializeBitMap',
1084: b'RtlInitializeConditionVariable',
1085: b'RtlInitializeContext',
1086: b'RtlInitializeCriticalSection',
1087: b'RtlInitializeCriticalSectionAndSpinCount',
1088: b'RtlInitializeCriticalSectionEx',
1089: b'RtlInitializeExceptionChain',
1090: b'RtlInitializeExtendedContext',
1091: b'RtlInitializeGenericTable',
1092: b'RtlInitializeGenericTableAvl',
1093: b'RtlInitializeHandleTable',
1094: b'RtlInitializeNtUserPfn',
1095: b'RtlInitializeRXact',
1096: b'RtlInitializeResource',
1097: b'RtlInitializeSListHead',
1098: b'RtlInitializeSRWLock',
1099: b'RtlInitializeSid',
1100: b'RtlInitializeSidEx',
1101: b'RtlInsertElementGenericTable',
1102: b'RtlInsertElementGenericTableAvl',
1103: b'RtlInsertElementGenericTableFull',
1104: b'RtlInsertElementGenericTableFullAvl',
1105: b'RtlInsertEntryHashTable',
1106: b'RtlInt64ToUnicodeString',
1107: b'RtlIntegerToChar',
1108: b'RtlIntegerToUnicodeString',
1109: b'RtlInterlockedClearBitRun',
1110: b'RtlInterlockedCompareExchange64',
1111: b'RtlInterlockedFlushSList',
1112: b'RtlInterlockedPopEntrySList',
1113: b'RtlInterlockedPushEntrySList',
1114: b'RtlInterlockedPushListSListEx',
1115: b'RtlInterlockedSetBitRun',
1116: b'RtlIoDecodeMemIoResource',
1117: b'RtlIoEncodeMemIoResource',
1118: b'RtlIpv4AddressToStringA',
1119: b'RtlIpv4AddressToStringExA',
1120: b'RtlIpv4AddressToStringExW',
1121: b'RtlIpv4AddressToStringW',
1122: b'RtlIpv4StringToAddressA',
1123: b'RtlIpv4StringToAddressExA',
1124: b'RtlIpv4StringToAddressExW',
1125: b'RtlIpv4StringToAddressW',
1126: b'RtlIpv6AddressToStringA',
1127: b'RtlIpv6AddressToStringExA',
1128: b'RtlIpv6AddressToStringExW',
1129: b'RtlIpv6AddressToStringW',
1130: b'RtlIpv6StringToAddressA',
1131: b'RtlIpv6StringToAddressExA',
1132: b'RtlIpv6StringToAddressExW',
1133: b'RtlIpv6StringToAddressW',
1134: b'RtlIsActivationContextActive',
1135: b'RtlIsCapabilitySid',
1136: b'RtlIsCriticalSectionLocked',
1137: b'RtlIsCriticalSectionLockedByThread',
1138: b'RtlIsCurrentThreadAttachExempt',
1139: b'RtlIsDosDeviceName_U',
1140: b'RtlIsGenericTableEmpty',
1141: b'RtlIsGenericTableEmptyAvl',
1142: b'RtlIsLongPathAwareProcessByManifest',
1143: b'RtlIsMultiSessionSku',
1144: b'RtlIsMultiUsersInSessionSku',
1145: b'RtlIsNameInExpression',
1146: b'RtlIsNameLegalDOS8Dot3',
1147: b'RtlIsNormalizedString',
1148: b'RtlIsPackageSid',
1149: b'RtlIsParentOfChildAppContainer',
1150: b'RtlIsProcessorFeaturePresent',
1151: b'RtlIsTextUnicode',
1152: b'RtlIsThreadWithinLoaderCallout',
1153: b'RtlIsUntrustedObject',
1154: b'RtlIsValidHandle',
1155: b'RtlIsValidIndexHandle',
1156: b'RtlIsValidLocaleName',
1157: b'RtlIsValidProcessTrustLabelSid',
1158: b'RtlKnownExceptionFilter',
1159: b'RtlLCIDToCultureName',
1160: b'RtlLargeIntegerAdd',
1161: b'RtlLargeIntegerArithmeticShift',
1162: b'RtlLargeIntegerDivide',
1163: b'RtlLargeIntegerNegate',
1164: b'RtlLargeIntegerShiftLeft',
1165: b'RtlLargeIntegerShiftRight',
1166: b'RtlLargeIntegerSubtract',
1167: b'RtlLargeIntegerToChar',
1168: b'RtlLcidToLocaleName',
1169: b'RtlLeaveCriticalSection',
1170: b'RtlLengthRequiredSid',
1171: b'RtlLengthSecurityDescriptor',
1172: b'RtlLengthSid',
1173: b'RtlLengthSidAsUnicodeString',
1174: b'RtlLoadString',
1175: b'RtlLocalTimeToSystemTime',
1176: b'RtlLocaleNameToLcid',
1177: b'RtlLocateExtendedFeature2',
1178: b'RtlLocateExtendedFeature',
1179: b'RtlLocateLegacyContext',
1180: b'RtlLockBootStatusData',
1181: b'RtlLockCurrentThread',
1182: b'RtlLockHeap',
1183: b'RtlLockMemoryBlockLookaside',
1184: b'RtlLockMemoryStreamRegion',
1185: b'RtlLockMemoryZone',
1186: b'RtlLockModuleSection',
1187: b'RtlLogStackBackTrace',
1188: b'RtlLookupAtomInAtomTable',
1189: b'RtlLookupElementGenericTable',
1190: b'RtlLookupElementGenericTableAvl',
1191: b'RtlLookupElementGenericTableFull',
1192: b'RtlLookupElementGenericTableFullAvl',
1193: b'RtlLookupEntryHashTable',
1194: b'RtlMakeSelfRelativeSD',
1195: b'RtlMapGenericMask',
1196: b'RtlMapSecurityErrorToNtStatus',
1197: b'RtlMoveMemory',
1198: b'RtlMultiAppendUnicodeStringBuffer',
1199: b'RtlMultiByteToUnicodeN',
1200: b'RtlMultiByteToUnicodeSize',
1201: b'RtlMultipleAllocateHeap',
1202: b'RtlMultipleFreeHeap',
1203: b'RtlNewInstanceSecurityObject',
1204: b'RtlNewSecurityGrantedAccess',
1205: b'RtlNewSecurityObject',
1206: b'RtlNewSecurityObjectEx',
1207: b'RtlNewSecurityObjectWithMultipleInheritance',
1208: b'RtlNormalizeProcessParams',
1209: b'RtlNormalizeString',
1210: b'RtlNtPathNameToDosPathName',
1211: b'RtlNtStatusToDosError',
1212: b'RtlNtStatusToDosErrorNoTeb',
1213: b'RtlNumberGenericTableElements',
1214: b'RtlNumberGenericTableElementsAvl',
1215: b'RtlNumberOfClearBits',
1216: b'RtlNumberOfClearBitsInRange',
1217: b'RtlNumberOfSetBits',
1218: b'RtlNumberOfSetBitsInRange',
1219: b'RtlNumberOfSetBitsUlongPtr',
1220: b'RtlOemStringToUnicodeSize',
1221: b'RtlOemStringToUnicodeString',
1222: b'RtlOemToUnicodeN',
1223: b'RtlOpenCurrentUser',
1224: b'RtlOsDeploymentState',
1225: b'RtlOwnerAcesPresent',
1226: b'RtlPcToFileHeader',
1227: b'RtlPinAtomInAtomTable',
1228: b'RtlPopFrame',
1229: b'RtlPrefixString',
1230: b'RtlPrefixUnicodeString',
1231: b'RtlProcessFlsData',
1232: b'RtlProtectHeap',
1233: b'RtlPublishWnfStateData',
1234: b'RtlPushFrame',
1235: b'RtlQueryActivationContextApplicationSettings',
1236: b'RtlQueryAtomInAtomTable',
1237: b'RtlQueryCriticalSectionOwner',
1238: b'RtlQueryDepthSList',
1239: b'RtlQueryDynamicTimeZoneInformation',
1240: b'RtlQueryElevationFlags',
1241: b'RtlQueryEnvironmentVariable',
1242: b'RtlQueryEnvironmentVariable_U',
1243: b'RtlQueryHeapInformation',
1244: b'RtlQueryInformationAcl',
1245: b'RtlQueryInformationActivationContext',
1246: b'RtlQueryInformationActiveActivationContext',
1247: b'RtlQueryInterfaceMemoryStream',
1248: b'RtlQueryModuleInformation',
1249: b'RtlQueryPackageClaims',
1250: b'RtlQueryPackageIdentity',
1251: b'RtlQueryPackageIdentityEx',
1252: b'RtlQueryPerformanceCounter',
1253: b'RtlQueryPerformanceFrequency',
1254: b'RtlQueryProcessBackTraceInformation',
1255: b'RtlQueryProcessDebugInformation',
1256: b'RtlQueryProcessHeapInformation',
1257: b'RtlQueryProcessLockInformation',
1258: b'RtlQueryProtectedPolicy',
1259: b'RtlQueryRegistryValues',
1260: b'RtlQueryRegistryValuesEx',
1261: b'RtlQueryResourcePolicy',
1262: b'RtlQuerySecurityObject',
1263: b'RtlQueryTagHeap',
1264: b'RtlQueryThreadProfiling',
1265: b'RtlQueryTimeZoneInformation',
1266: b'RtlQueryUnbiasedInterruptTime',
1267: b'RtlQueryValidationRunlevel',
1268: b'RtlQueryWnfMetaNotification',
1269: b'RtlQueryWnfStateData',
1270: b'RtlQueryWnfStateDataWithExplicitScope',
1271: b'RtlQueueApcWow64Thread',
1272: b'RtlQueueWorkItem',
1273: b'RtlRaiseException',
1274: b'RtlRaiseStatus',
1275: b'RtlRandom',
1276: b'RtlRandomEx',
1277: b'RtlRbInsertNodeEx',
1278: b'RtlRbRemoveNode',
1279: b'RtlReAllocateHeap',
1280: b'RtlReadMemoryStream',
1281: b'RtlReadOutOfProcessMemoryStream',
1282: b'RtlReadThreadProfilingData',
| |
+ pod._translation_hh0 + pod._translation_qqq #440
_matrix_2_00z_3q3qq_miss = pod.matrices_dict["2_00z"] + pod._translation_hh0 + pod._translation_qqq #441
_matrix_2_xx0_3q3qq_miss = pod.matrices_dict["2_xx0"] + pod._translation_hh0 + pod._translation_qqq #442
_matrix_2_x0x_3q3qq_miss = pod.matrices_dict["2_x0x"] + pod._translation_hh0 + pod._translation_qqq #443
_matrix_2_0yy_3q3qq_miss = pod.matrices_dict["2_0yy"] + pod._translation_hh0 + pod._translation_qqq #444
_matrix_2_xmx0_3q3qq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hh0 + pod._translation_qqq #445
_matrix_2_mx0x_3q3qq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hh0 + pod._translation_qqq #446
_matrix_2_0myy_3q3qq_miss = pod.matrices_dict["2_0myy"] + pod._translation_hh0 + pod._translation_qqq #447
_matrix_3_xxx_3q3qq_miss = pod.matrices_dict["3_xxx"] + pod._translation_hh0 + pod._translation_qqq #448
_matrix_3_xmxmx_3q3qq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hh0 + pod._translation_qqq #449
_matrix_3_mxxmx_3q3qq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hh0 + pod._translation_qqq #450
_matrix_3_mxmxx_3q3qq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hh0 + pod._translation_qqq #451
_matrix_m3_xxx_3q3qq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hh0 + pod._translation_qqq #452
_matrix_m3_xmxmx_3q3qq_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hh0 + pod._translation_qqq #453
_matrix_m3_mxxmx_3q3qq_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hh0 + pod._translation_qqq #454
_matrix_m3_mxmxx_3q3qq_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hh0 + pod._translation_qqq #455
_matrix_4_x00_3q3qq_miss = pod.matrices_dict["4_x00"] + pod._translation_hh0 + pod._translation_qqq #456
_matrix_4_0y0_3q3qq_miss = pod.matrices_dict["4_0y0"] + pod._translation_hh0 + pod._translation_qqq #457
_matrix_4_00z_3q3qq_miss = pod.matrices_dict["4_00z"] + pod._translation_hh0 + pod._translation_qqq #458
_matrix_m4_x00_3q3qq_miss = pod.matrices_dict["-4_x00"] + pod._translation_hh0 + pod._translation_qqq #459
_matrix_m4_0y0_3q3qq_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hh0 + pod._translation_qqq #460
_matrix_m4_00z_3q3qq_miss = pod.matrices_dict["-4_00z"] + pod._translation_hh0 + pod._translation_qqq #461
_matrix_inv_000_3q3q3q_miss = pod.matrices_dict["inv_000"] + pod._translation_hhh + pod._translation_qqq #462
_matrix_m_0yz_3q3q3q_miss = pod.matrices_dict["m_0yz"] + pod._translation_hhh + pod._translation_qqq #463
_matrix_m_x0z_3q3q3q_miss = pod.matrices_dict["m_x0z"] + pod._translation_hhh + pod._translation_qqq #464
_matrix_m_xy0_3q3q3q_miss = pod.matrices_dict["m_xy0"] + pod._translation_hhh + pod._translation_qqq #465
_matrix_m_xmxz_3q3q3q_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hhh + pod._translation_qqq #466
_matrix_m_xymy_3q3q3q_miss = pod.matrices_dict["m_xymy"] + pod._translation_hhh + pod._translation_qqq #467
_matrix_m_xymx_3q3q3q_miss = pod.matrices_dict["m_xymx"] + pod._translation_hhh + pod._translation_qqq #468
_matrix_m_xyx_3q3q3q_miss = pod.matrices_dict["m_xyx"] + pod._translation_hhh + pod._translation_qqq #469
_matrix_m_xxz_3q3q3q_miss = pod.matrices_dict["m_xxz"] + pod._translation_hhh + pod._translation_qqq #470
_matrix_m_xyy_3q3q3q_miss = pod.matrices_dict["m_xyy"] + pod._translation_hhh + pod._translation_qqq #471
_matrix_2_x00_3q3q3q_miss = pod.matrices_dict["2_x00"] + pod._translation_hhh + pod._translation_qqq #472
_matrix_2_0y0_3q3q3q_miss = pod.matrices_dict["2_0y0"] + pod._translation_hhh + pod._translation_qqq #473
_matrix_2_00z_3q3q3q_miss = pod.matrices_dict["2_00z"] + pod._translation_hhh + pod._translation_qqq #474
_matrix_2_xx0_3q3q3q_miss = pod.matrices_dict["2_xx0"] + pod._translation_hhh + pod._translation_qqq #475
_matrix_2_x0x_3q3q3q_miss = pod.matrices_dict["2_x0x"] + pod._translation_hhh + pod._translation_qqq #476
_matrix_2_0yy_3q3q3q_miss = pod.matrices_dict["2_0yy"] + pod._translation_hhh + pod._translation_qqq #477
_matrix_2_xmx0_3q3q3q_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hhh + pod._translation_qqq #478
_matrix_2_mx0x_3q3q3q_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hhh + pod._translation_qqq #479
_matrix_2_0myy_3q3q3q_miss = pod.matrices_dict["2_0myy"] + pod._translation_hhh + pod._translation_qqq #480
_matrix_3_xxx_3q3q3q_miss = pod.matrices_dict["3_xxx"] + pod._translation_hhh + pod._translation_qqq #481
_matrix_3_xmxmx_3q3q3q_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hhh + pod._translation_qqq #482
_matrix_3_mxxmx_3q3q3q_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hhh + pod._translation_qqq #483
_matrix_3_mxmxx_3q3q3q_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hhh + pod._translation_qqq #484
_matrix_m3_xxx_3q3q3q_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hhh + pod._translation_qqq #485
_matrix_m3_xmxmx_3q3q3q_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hhh + pod._translation_qqq #486
_matrix_m3_mxxmx_3q3q3q_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hhh + pod._translation_qqq #487
_matrix_m3_mxmxx_3q3q3q_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hhh + pod._translation_qqq #488
_matrix_4_x00_3q3q3q_miss = pod.matrices_dict["4_x00"] + pod._translation_hhh + pod._translation_qqq #489
_matrix_4_0y0_3q3q3q_miss = pod.matrices_dict["4_0y0"] + pod._translation_hhh + pod._translation_qqq #490
_matrix_4_00z_3q3q3q_miss = pod.matrices_dict["4_00z"] + pod._translation_hhh + pod._translation_qqq #491
_matrix_m4_x00_3q3q3q_miss = pod.matrices_dict["-4_x00"] + pod._translation_hhh + pod._translation_qqq #492
_matrix_m4_0y0_3q3q3q_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hhh + pod._translation_qqq #493
_matrix_m4_00z_3q3q3q_miss = pod.matrices_dict["-4_00z"] + pod._translation_hhh + pod._translation_qqq #494
all_missing_matrices = [
_matrix_inv_000_h00_miss,
_matrix_m_0yz_h00_miss,
_matrix_m_x0z_h00_miss,
_matrix_m_xy0_h00_miss,
_matrix_m_xmxz_h00_miss,
_matrix_m_xymy_h00_miss,
_matrix_m_xymx_h00_miss,
_matrix_m_xyx_h00_miss,
_matrix_m_xxz_h00_miss,
_matrix_m_xyy_h00_miss,
_matrix_2_x00_h00_miss,
_matrix_2_0y0_h00_miss,
_matrix_2_00z_h00_miss,
_matrix_2_xx0_h00_miss,
_matrix_2_x0x_h00_miss,
_matrix_2_0yy_h00_miss,
_matrix_2_xmx0_h00_miss,
_matrix_2_mx0x_h00_miss,
_matrix_2_0myy_h00_miss,
_matrix_3_xxx_h00_miss,
_matrix_3_xmxmx_h00_miss,
_matrix_3_mxxmx_h00_miss,
_matrix_3_mxmxx_h00_miss,
_matrix_m3_xxx_h00_miss,
_matrix_m3_xmxmx_h00_miss,
_matrix_m3_mxxmx_h00_miss,
_matrix_m3_mxmxx_h00_miss,
_matrix_4_x00_h00_miss,
_matrix_4_0y0_h00_miss,
_matrix_4_00z_h00_miss,
_matrix_m4_x00_h00_miss,
_matrix_m4_0y0_h00_miss,
_matrix_m4_00z_h00_miss,
_matrix_inv_000_0h0_miss,
_matrix_m_0yz_0h0_miss,
_matrix_m_x0z_0h0_miss,
_matrix_m_xy0_0h0_miss,
_matrix_m_xmxz_0h0_miss,
_matrix_m_xymy_0h0_miss,
_matrix_m_xymx_0h0_miss,
_matrix_m_xyx_0h0_miss,
_matrix_m_xxz_0h0_miss,
_matrix_m_xyy_0h0_miss,
_matrix_2_x00_0h0_miss,
_matrix_2_0y0_0h0_miss,
_matrix_2_00z_0h0_miss,
_matrix_2_xx0_0h0_miss,
_matrix_2_x0x_0h0_miss,
_matrix_2_0yy_0h0_miss,
_matrix_2_xmx0_0h0_miss,
_matrix_2_mx0x_0h0_miss,
_matrix_2_0myy_0h0_miss,
_matrix_3_xxx_0h0_miss,
_matrix_3_xmxmx_0h0_miss,
_matrix_3_mxxmx_0h0_miss,
_matrix_3_mxmxx_0h0_miss,
_matrix_m3_xxx_0h0_miss,
_matrix_m3_xmxmx_0h0_miss,
_matrix_m3_mxxmx_0h0_miss,
_matrix_m3_mxmxx_0h0_miss,
_matrix_4_x00_0h0_miss,
_matrix_4_0y0_0h0_miss,
_matrix_4_00z_0h0_miss,
_matrix_m4_x00_0h0_miss,
_matrix_m4_0y0_0h0_miss,
_matrix_m4_00z_0h0_miss,
_matrix_inv_000_00h_miss,
_matrix_m_0yz_00h_miss,
_matrix_m_x0z_00h_miss,
_matrix_m_xy0_00h_miss,
_matrix_m_xmxz_00h_miss,
_matrix_m_xymy_00h_miss,
_matrix_m_xymx_00h_miss,
_matrix_m_xyx_00h_miss,
_matrix_m_xxz_00h_miss,
_matrix_m_xyy_00h_miss,
_matrix_2_x00_00h_miss,
_matrix_2_0y0_00h_miss,
_matrix_2_00z_00h_miss,
_matrix_2_xx0_00h_miss,
_matrix_2_x0x_00h_miss,
_matrix_2_0yy_00h_miss,
_matrix_2_xmx0_00h_miss,
_matrix_2_mx0x_00h_miss,
_matrix_2_0myy_00h_miss,
_matrix_3_xxx_00h_miss,
_matrix_3_xmxmx_00h_miss,
_matrix_3_mxxmx_00h_miss,
_matrix_3_mxmxx_00h_miss,
_matrix_m3_xxx_00h_miss,
_matrix_m3_xmxmx_00h_miss,
_matrix_m3_mxxmx_00h_miss,
_matrix_m3_mxmxx_00h_miss,
_matrix_4_x00_00h_miss,
_matrix_4_0y0_00h_miss,
_matrix_4_00z_00h_miss,
_matrix_m4_x00_00h_miss,
_matrix_m4_0y0_00h_miss,
_matrix_m4_00z_00h_miss,
_matrix_inv_000_0hh_miss,
_matrix_m_0yz_0hh_miss,
_matrix_m_x0z_0hh_miss,
_matrix_m_xy0_0hh_miss,
_matrix_m_xmxz_0hh_miss,
_matrix_m_xymy_0hh_miss,
_matrix_m_xymx_0hh_miss,
_matrix_m_xyx_0hh_miss,
_matrix_m_xxz_0hh_miss,
_matrix_m_xyy_0hh_miss,
_matrix_2_x00_0hh_miss,
_matrix_2_0y0_0hh_miss,
_matrix_2_00z_0hh_miss,
_matrix_2_xx0_0hh_miss,
_matrix_2_x0x_0hh_miss,
_matrix_2_0yy_0hh_miss,
_matrix_2_xmx0_0hh_miss,
_matrix_2_mx0x_0hh_miss,
_matrix_2_0myy_0hh_miss,
_matrix_3_xxx_0hh_miss,
_matrix_3_xmxmx_0hh_miss,
_matrix_3_mxxmx_0hh_miss,
_matrix_3_mxmxx_0hh_miss,
_matrix_m3_xxx_0hh_miss,
_matrix_m3_xmxmx_0hh_miss,
_matrix_m3_mxxmx_0hh_miss,
_matrix_m3_mxmxx_0hh_miss,
_matrix_4_x00_0hh_miss,
_matrix_4_0y0_0hh_miss,
_matrix_4_00z_0hh_miss,
_matrix_m4_x00_0hh_miss,
_matrix_m4_0y0_0hh_miss,
_matrix_m4_00z_0hh_miss,
_matrix_inv_000_h0h_miss,
_matrix_m_0yz_h0h_miss,
_matrix_m_x0z_h0h_miss,
_matrix_m_xy0_h0h_miss,
_matrix_m_xmxz_h0h_miss,
_matrix_m_xymy_h0h_miss,
_matrix_m_xymx_h0h_miss,
_matrix_m_xyx_h0h_miss,
_matrix_m_xxz_h0h_miss,
_matrix_m_xyy_h0h_miss,
_matrix_2_x00_h0h_miss,
_matrix_2_0y0_h0h_miss,
_matrix_2_00z_h0h_miss,
_matrix_2_xx0_h0h_miss,
_matrix_2_x0x_h0h_miss,
_matrix_2_0yy_h0h_miss,
_matrix_2_xmx0_h0h_miss,
_matrix_2_mx0x_h0h_miss,
_matrix_2_0myy_h0h_miss,
_matrix_3_xxx_h0h_miss,
_matrix_3_xmxmx_h0h_miss,
_matrix_3_mxxmx_h0h_miss,
_matrix_3_mxmxx_h0h_miss,
_matrix_m3_xxx_h0h_miss,
_matrix_m3_xmxmx_h0h_miss,
_matrix_m3_mxxmx_h0h_miss,
_matrix_m3_mxmxx_h0h_miss,
_matrix_4_x00_h0h_miss,
_matrix_4_0y0_h0h_miss,
_matrix_4_00z_h0h_miss,
_matrix_m4_x00_h0h_miss,
_matrix_m4_0y0_h0h_miss,
_matrix_m4_00z_h0h_miss,
_matrix_inv_000_hh0_miss,
_matrix_m_0yz_hh0_miss,
_matrix_m_x0z_hh0_miss,
_matrix_m_xy0_hh0_miss,
_matrix_m_xmxz_hh0_miss,
_matrix_m_xymy_hh0_miss,
_matrix_m_xymx_hh0_miss,
_matrix_m_xyx_hh0_miss,
_matrix_m_xxz_hh0_miss,
_matrix_m_xyy_hh0_miss,
_matrix_2_x00_hh0_miss,
_matrix_2_0y0_hh0_miss,
_matrix_2_00z_hh0_miss,
_matrix_2_xx0_hh0_miss,
_matrix_2_x0x_hh0_miss,
_matrix_2_0yy_hh0_miss,
_matrix_2_xmx0_hh0_miss,
_matrix_2_mx0x_hh0_miss,
_matrix_2_0myy_hh0_miss,
_matrix_3_xxx_hh0_miss,
_matrix_3_xmxmx_hh0_miss,
_matrix_3_mxxmx_hh0_miss,
_matrix_3_mxmxx_hh0_miss,
_matrix_m3_xxx_hh0_miss,
_matrix_m3_xmxmx_hh0_miss,
_matrix_m3_mxxmx_hh0_miss,
_matrix_m3_mxmxx_hh0_miss,
_matrix_4_x00_hh0_miss,
_matrix_4_0y0_hh0_miss,
_matrix_4_00z_hh0_miss,
_matrix_m4_x00_hh0_miss,
_matrix_m4_0y0_hh0_miss,
_matrix_m4_00z_hh0_miss,
_matrix_inv_000_hhh_miss,
_matrix_m_0yz_hhh_miss,
_matrix_m_x0z_hhh_miss,
_matrix_m_xy0_hhh_miss,
_matrix_m_xmxz_hhh_miss,
_matrix_m_xymy_hhh_miss,
_matrix_m_xymx_hhh_miss,
_matrix_m_xyx_hhh_miss,
_matrix_m_xxz_hhh_miss,
_matrix_m_xyy_hhh_miss,
_matrix_2_x00_hhh_miss,
_matrix_2_0y0_hhh_miss,
_matrix_2_00z_hhh_miss,
_matrix_2_xx0_hhh_miss,
_matrix_2_x0x_hhh_miss,
_matrix_2_0yy_hhh_miss,
_matrix_2_xmx0_hhh_miss,
_matrix_2_mx0x_hhh_miss,
_matrix_2_0myy_hhh_miss,
_matrix_3_xxx_hhh_miss,
_matrix_3_xmxmx_hhh_miss,
_matrix_3_mxxmx_hhh_miss,
_matrix_3_mxmxx_hhh_miss,
_matrix_m3_xxx_hhh_miss,
_matrix_m3_xmxmx_hhh_miss,
_matrix_m3_mxxmx_hhh_miss,
_matrix_m3_mxmxx_hhh_miss,
_matrix_4_x00_hhh_miss,
_matrix_4_0y0_hhh_miss,
_matrix_4_00z_hhh_miss,
_matrix_m4_x00_hhh_miss,
_matrix_m4_0y0_hhh_miss,
_matrix_m4_00z_hhh_miss,
_matrix_inv_000_qqq_miss,
_matrix_m_0yz_qqq_miss,
_matrix_m_x0z_qqq_miss,
_matrix_m_xy0_qqq_miss,
_matrix_m_xmxz_qqq_miss,
_matrix_m_xymy_qqq_miss,
_matrix_m_xymx_qqq_miss,
_matrix_m_xyx_qqq_miss,
_matrix_m_xxz_qqq_miss,
_matrix_m_xyy_qqq_miss,
_matrix_2_x00_qqq_miss,
_matrix_2_0y0_qqq_miss,
_matrix_2_00z_qqq_miss,
_matrix_2_xx0_qqq_miss,
_matrix_2_x0x_qqq_miss,
_matrix_2_0yy_qqq_miss,
_matrix_2_xmx0_qqq_miss,
_matrix_2_mx0x_qqq_miss,
_matrix_2_0myy_qqq_miss,
_matrix_3_xxx_qqq_miss,
_matrix_3_xmxmx_qqq_miss,
_matrix_3_mxxmx_qqq_miss,
_matrix_3_mxmxx_qqq_miss,
_matrix_m3_xxx_qqq_miss,
_matrix_m3_xmxmx_qqq_miss,
_matrix_m3_mxxmx_qqq_miss,
_matrix_m3_mxmxx_qqq_miss,
_matrix_4_x00_qqq_miss,
_matrix_4_0y0_qqq_miss,
_matrix_4_00z_qqq_miss,
_matrix_m4_x00_qqq_miss,
_matrix_m4_0y0_qqq_miss,
_matrix_m4_00z_qqq_miss,
_matrix_inv_000_3qqq_miss,
_matrix_m_0yz_3qqq_miss,
_matrix_m_x0z_3qqq_miss,
_matrix_m_xy0_3qqq_miss,
_matrix_m_xmxz_3qqq_miss,
_matrix_m_xymy_3qqq_miss,
_matrix_m_xymx_3qqq_miss,
_matrix_m_xyx_3qqq_miss,
_matrix_m_xxz_3qqq_miss,
_matrix_m_xyy_3qqq_miss,
_matrix_2_x00_3qqq_miss,
_matrix_2_0y0_3qqq_miss,
_matrix_2_00z_3qqq_miss,
_matrix_2_xx0_3qqq_miss,
_matrix_2_x0x_3qqq_miss,
_matrix_2_0yy_3qqq_miss,
_matrix_2_xmx0_3qqq_miss,
_matrix_2_mx0x_3qqq_miss,
_matrix_2_0myy_3qqq_miss,
_matrix_3_xxx_3qqq_miss,
_matrix_3_xmxmx_3qqq_miss,
_matrix_3_mxxmx_3qqq_miss,
_matrix_3_mxmxx_3qqq_miss,
_matrix_m3_xxx_3qqq_miss,
_matrix_m3_xmxmx_3qqq_miss,
_matrix_m3_mxxmx_3qqq_miss,
_matrix_m3_mxmxx_3qqq_miss,
_matrix_4_x00_3qqq_miss,
_matrix_4_0y0_3qqq_miss,
_matrix_4_00z_3qqq_miss,
_matrix_m4_x00_3qqq_miss,
_matrix_m4_0y0_3qqq_miss,
_matrix_m4_00z_3qqq_miss,
_matrix_inv_000_q3qq_miss,
_matrix_m_0yz_q3qq_miss,
_matrix_m_x0z_q3qq_miss,
_matrix_m_xy0_q3qq_miss,
_matrix_m_xmxz_q3qq_miss,
_matrix_m_xymy_q3qq_miss,
_matrix_m_xymx_q3qq_miss,
_matrix_m_xyx_q3qq_miss,
_matrix_m_xxz_q3qq_miss,
_matrix_m_xyy_q3qq_miss,
_matrix_2_x00_q3qq_miss,
_matrix_2_0y0_q3qq_miss,
_matrix_2_00z_q3qq_miss,
_matrix_2_xx0_q3qq_miss,
_matrix_2_x0x_q3qq_miss,
_matrix_2_0yy_q3qq_miss,
_matrix_2_xmx0_q3qq_miss,
_matrix_2_mx0x_q3qq_miss,
_matrix_2_0myy_q3qq_miss,
_matrix_3_xxx_q3qq_miss,
_matrix_3_xmxmx_q3qq_miss,
_matrix_3_mxxmx_q3qq_miss,
_matrix_3_mxmxx_q3qq_miss,
_matrix_m3_xxx_q3qq_miss,
_matrix_m3_xmxmx_q3qq_miss,
_matrix_m3_mxxmx_q3qq_miss,
_matrix_m3_mxmxx_q3qq_miss,
_matrix_4_x00_q3qq_miss,
_matrix_4_0y0_q3qq_miss,
_matrix_4_00z_q3qq_miss,
_matrix_m4_x00_q3qq_miss,
_matrix_m4_0y0_q3qq_miss,
_matrix_m4_00z_q3qq_miss,
_matrix_inv_000_qq3q_miss,
_matrix_m_0yz_qq3q_miss,
_matrix_m_x0z_qq3q_miss,
_matrix_m_xy0_qq3q_miss,
_matrix_m_xmxz_qq3q_miss,
_matrix_m_xymy_qq3q_miss,
_matrix_m_xymx_qq3q_miss,
_matrix_m_xyx_qq3q_miss,
_matrix_m_xxz_qq3q_miss,
_matrix_m_xyy_qq3q_miss,
_matrix_2_x00_qq3q_miss,
_matrix_2_0y0_qq3q_miss,
_matrix_2_00z_qq3q_miss,
_matrix_2_xx0_qq3q_miss,
_matrix_2_x0x_qq3q_miss,
_matrix_2_0yy_qq3q_miss,
_matrix_2_xmx0_qq3q_miss,
_matrix_2_mx0x_qq3q_miss,
_matrix_2_0myy_qq3q_miss,
_matrix_3_xxx_qq3q_miss,
_matrix_3_xmxmx_qq3q_miss,
_matrix_3_mxxmx_qq3q_miss,
_matrix_3_mxmxx_qq3q_miss,
_matrix_m3_xxx_qq3q_miss,
_matrix_m3_xmxmx_qq3q_miss,
_matrix_m3_mxxmx_qq3q_miss,
_matrix_m3_mxmxx_qq3q_miss,
_matrix_4_x00_qq3q_miss,
_matrix_4_0y0_qq3q_miss,
_matrix_4_00z_qq3q_miss,
_matrix_m4_x00_qq3q_miss,
_matrix_m4_0y0_qq3q_miss,
_matrix_m4_00z_qq3q_miss,
_matrix_inv_000_q3q3q_miss,
_matrix_m_0yz_q3q3q_miss,
_matrix_m_x0z_q3q3q_miss,
_matrix_m_xy0_q3q3q_miss,
_matrix_m_xmxz_q3q3q_miss,
_matrix_m_xymy_q3q3q_miss,
_matrix_m_xymx_q3q3q_miss,
_matrix_m_xyx_q3q3q_miss,
_matrix_m_xxz_q3q3q_miss,
_matrix_m_xyy_q3q3q_miss,
_matrix_2_x00_q3q3q_miss,
_matrix_2_0y0_q3q3q_miss,
_matrix_2_00z_q3q3q_miss,
_matrix_2_xx0_q3q3q_miss,
_matrix_2_x0x_q3q3q_miss,
_matrix_2_0yy_q3q3q_miss,
_matrix_2_xmx0_q3q3q_miss,
_matrix_2_mx0x_q3q3q_miss,
_matrix_2_0myy_q3q3q_miss,
_matrix_3_xxx_q3q3q_miss,
_matrix_3_xmxmx_q3q3q_miss,
_matrix_3_mxxmx_q3q3q_miss,
_matrix_3_mxmxx_q3q3q_miss,
_matrix_m3_xxx_q3q3q_miss,
_matrix_m3_xmxmx_q3q3q_miss,
_matrix_m3_mxxmx_q3q3q_miss,
_matrix_m3_mxmxx_q3q3q_miss,
_matrix_4_x00_q3q3q_miss,
_matrix_4_0y0_q3q3q_miss,
_matrix_4_00z_q3q3q_miss,
_matrix_m4_x00_q3q3q_miss,
_matrix_m4_0y0_q3q3q_miss,
_matrix_m4_00z_q3q3q_miss,
_matrix_inv_000_3qq3q_miss,
_matrix_m_0yz_3qq3q_miss,
_matrix_m_x0z_3qq3q_miss,
_matrix_m_xy0_3qq3q_miss,
_matrix_m_xmxz_3qq3q_miss,
_matrix_m_xymy_3qq3q_miss,
_matrix_m_xymx_3qq3q_miss,
_matrix_m_xyx_3qq3q_miss,
_matrix_m_xxz_3qq3q_miss,
_matrix_m_xyy_3qq3q_miss,
_matrix_2_x00_3qq3q_miss,
_matrix_2_0y0_3qq3q_miss,
_matrix_2_00z_3qq3q_miss,
_matrix_2_xx0_3qq3q_miss,
_matrix_2_x0x_3qq3q_miss,
_matrix_2_0yy_3qq3q_miss,
_matrix_2_xmx0_3qq3q_miss,
_matrix_2_mx0x_3qq3q_miss,
_matrix_2_0myy_3qq3q_miss,
_matrix_3_xxx_3qq3q_miss,
_matrix_3_xmxmx_3qq3q_miss,
_matrix_3_mxxmx_3qq3q_miss,
_matrix_3_mxmxx_3qq3q_miss,
_matrix_m3_xxx_3qq3q_miss,
_matrix_m3_xmxmx_3qq3q_miss,
_matrix_m3_mxxmx_3qq3q_miss,
_matrix_m3_mxmxx_3qq3q_miss,
_matrix_4_x00_3qq3q_miss,
_matrix_4_0y0_3qq3q_miss,
_matrix_4_00z_3qq3q_miss,
_matrix_m4_x00_3qq3q_miss,
_matrix_m4_0y0_3qq3q_miss,
_matrix_m4_00z_3qq3q_miss,
_matrix_inv_000_3q3qq_miss,
_matrix_m_0yz_3q3qq_miss,
_matrix_m_x0z_3q3qq_miss,
_matrix_m_xy0_3q3qq_miss,
_matrix_m_xmxz_3q3qq_miss,
_matrix_m_xymy_3q3qq_miss,
_matrix_m_xymx_3q3qq_miss,
_matrix_m_xyx_3q3qq_miss,
_matrix_m_xxz_3q3qq_miss,
_matrix_m_xyy_3q3qq_miss,
_matrix_2_x00_3q3qq_miss,
_matrix_2_0y0_3q3qq_miss,
_matrix_2_00z_3q3qq_miss,
_matrix_2_xx0_3q3qq_miss,
_matrix_2_x0x_3q3qq_miss,
_matrix_2_0yy_3q3qq_miss,
_matrix_2_xmx0_3q3qq_miss,
_matrix_2_mx0x_3q3qq_miss,
_matrix_2_0myy_3q3qq_miss,
_matrix_3_xxx_3q3qq_miss,
_matrix_3_xmxmx_3q3qq_miss,
_matrix_3_mxxmx_3q3qq_miss,
_matrix_3_mxmxx_3q3qq_miss,
_matrix_m3_xxx_3q3qq_miss,
_matrix_m3_xmxmx_3q3qq_miss,
_matrix_m3_mxxmx_3q3qq_miss,
_matrix_m3_mxmxx_3q3qq_miss,
_matrix_4_x00_3q3qq_miss,
_matrix_4_0y0_3q3qq_miss,
_matrix_4_00z_3q3qq_miss,
_matrix_m4_x00_3q3qq_miss,
_matrix_m4_0y0_3q3qq_miss,
_matrix_m4_00z_3q3qq_miss,
_matrix_inv_000_3q3q3q_miss,
_matrix_m_0yz_3q3q3q_miss,
_matrix_m_x0z_3q3q3q_miss,
_matrix_m_xy0_3q3q3q_miss,
_matrix_m_xmxz_3q3q3q_miss,
_matrix_m_xymy_3q3q3q_miss,
_matrix_m_xymx_3q3q3q_miss,
_matrix_m_xyx_3q3q3q_miss,
_matrix_m_xxz_3q3q3q_miss,
_matrix_m_xyy_3q3q3q_miss,
_matrix_2_x00_3q3q3q_miss,
_matrix_2_0y0_3q3q3q_miss,
_matrix_2_00z_3q3q3q_miss,
_matrix_2_xx0_3q3q3q_miss,
_matrix_2_x0x_3q3q3q_miss,
_matrix_2_0yy_3q3q3q_miss,
_matrix_2_xmx0_3q3q3q_miss,
_matrix_2_mx0x_3q3q3q_miss,
_matrix_2_0myy_3q3q3q_miss,
_matrix_3_xxx_3q3q3q_miss,
_matrix_3_xmxmx_3q3q3q_miss,
_matrix_3_mxxmx_3q3q3q_miss,
_matrix_3_mxmxx_3q3q3q_miss,
_matrix_m3_xxx_3q3q3q_miss,
_matrix_m3_xmxmx_3q3q3q_miss,
_matrix_m3_mxxmx_3q3q3q_miss,
_matrix_m3_mxmxx_3q3q3q_miss,
_matrix_4_x00_3q3q3q_miss,
_matrix_4_0y0_3q3q3q_miss,
_matrix_4_00z_3q3q3q_miss,
_matrix_m4_x00_3q3q3q_miss,
_matrix_m4_0y0_3q3q3q_miss,
_matrix_m4_00z_3q3q3q_miss,
]
all_missing_matrices_labels = [
"miss_inv_000_h00_miss",
"miss_m_0yz_h00_miss",
"miss_m_x0z_h00_miss",
"miss_m_xy0_h00_miss",
"miss_m_xmxz_h00_miss",
"miss_m_xymy_h00_miss",
"miss_m_xymx_h00_miss",
"miss_m_xyx_h00_miss",
"miss_m_xxz_h00_miss",
"miss_m_xyy_h00_miss",
"miss_2_x00_h00_miss",
"miss_2_0y0_h00_miss",
"miss_2_00z_h00_miss",
"miss_2_xx0_h00_miss",
"miss_2_x0x_h00_miss",
"miss_2_0yy_h00_miss",
"miss_2_xmx0_h00_miss",
"miss_2_mx0x_h00_miss",
"miss_2_0myy_h00_miss",
"miss_3_xxx_h00_miss",
"miss_3_xmxmx_h00_miss",
"miss_3_mxxmx_h00_miss",
"miss_3_mxmxx_h00_miss",
"miss_m3_xxx_h00_miss",
"miss_m3_xmxmx_h00_miss",
"miss_m3_mxxmx_h00_miss",
"miss_m3_mxmxx_h00_miss",
"miss_4_x00_h00_miss",
"miss_4_0y0_h00_miss",
"miss_4_00z_h00_miss",
"miss_m4_x00_h00_miss",
"miss_m4_0y0_h00_miss",
"miss_m4_00z_h00_miss",
"miss_inv_000_0h0_miss",
"miss_m_0yz_0h0_miss",
"miss_m_x0z_0h0_miss",
"miss_m_xy0_0h0_miss",
"miss_m_xmxz_0h0_miss",
"miss_m_xymy_0h0_miss",
"miss_m_xymx_0h0_miss",
"miss_m_xyx_0h0_miss",
"miss_m_xxz_0h0_miss",
"miss_m_xyy_0h0_miss",
"miss_2_x00_0h0_miss",
"miss_2_0y0_0h0_miss",
"miss_2_00z_0h0_miss",
"miss_2_xx0_0h0_miss",
"miss_2_x0x_0h0_miss",
"miss_2_0yy_0h0_miss",
"miss_2_xmx0_0h0_miss",
"miss_2_mx0x_0h0_miss",
"miss_2_0myy_0h0_miss",
"miss_3_xxx_0h0_miss",
"miss_3_xmxmx_0h0_miss",
"miss_3_mxxmx_0h0_miss",
"miss_3_mxmxx_0h0_miss",
"miss_m3_xxx_0h0_miss",
"miss_m3_xmxmx_0h0_miss",
"miss_m3_mxxmx_0h0_miss",
"miss_m3_mxmxx_0h0_miss",
"miss_4_x00_0h0_miss",
"miss_4_0y0_0h0_miss",
"miss_4_00z_0h0_miss",
"miss_m4_x00_0h0_miss",
"miss_m4_0y0_0h0_miss",
"miss_m4_00z_0h0_miss",
"miss_inv_000_00h_miss",
"miss_m_0yz_00h_miss",
"miss_m_x0z_00h_miss",
"miss_m_xy0_00h_miss",
"miss_m_xmxz_00h_miss",
"miss_m_xymy_00h_miss",
"miss_m_xymx_00h_miss",
"miss_m_xyx_00h_miss",
"miss_m_xxz_00h_miss",
"miss_m_xyy_00h_miss",
"miss_2_x00_00h_miss",
"miss_2_0y0_00h_miss",
"miss_2_00z_00h_miss",
"miss_2_xx0_00h_miss",
"miss_2_x0x_00h_miss",
"miss_2_0yy_00h_miss",
"miss_2_xmx0_00h_miss",
"miss_2_mx0x_00h_miss",
"miss_2_0myy_00h_miss",
"miss_3_xxx_00h_miss",
"miss_3_xmxmx_00h_miss",
"miss_3_mxxmx_00h_miss",
"miss_3_mxmxx_00h_miss",
"miss_m3_xxx_00h_miss",
"miss_m3_xmxmx_00h_miss",
"miss_m3_mxxmx_00h_miss",
"miss_m3_mxmxx_00h_miss",
"miss_4_x00_00h_miss",
"miss_4_0y0_00h_miss",
"miss_4_00z_00h_miss",
"miss_m4_x00_00h_miss",
"miss_m4_0y0_00h_miss",
"miss_m4_00z_00h_miss",
"miss_inv_000_0hh_miss",
"miss_m_0yz_0hh_miss",
"miss_m_x0z_0hh_miss",
"miss_m_xy0_0hh_miss",
"miss_m_xmxz_0hh_miss",
"miss_m_xymy_0hh_miss",
"miss_m_xymx_0hh_miss",
"miss_m_xyx_0hh_miss",
"miss_m_xxz_0hh_miss",
"miss_m_xyy_0hh_miss",
"miss_2_x00_0hh_miss",
"miss_2_0y0_0hh_miss",
"miss_2_00z_0hh_miss",
"miss_2_xx0_0hh_miss",
"miss_2_x0x_0hh_miss",
"miss_2_0yy_0hh_miss",
"miss_2_xmx0_0hh_miss",
"miss_2_mx0x_0hh_miss",
"miss_2_0myy_0hh_miss",
"miss_3_xxx_0hh_miss",
"miss_3_xmxmx_0hh_miss",
"miss_3_mxxmx_0hh_miss",
"miss_3_mxmxx_0hh_miss",
"miss_m3_xxx_0hh_miss",
"miss_m3_xmxmx_0hh_miss",
"miss_m3_mxxmx_0hh_miss",
"miss_m3_mxmxx_0hh_miss",
"miss_4_x00_0hh_miss",
"miss_4_0y0_0hh_miss",
"miss_4_00z_0hh_miss",
"miss_m4_x00_0hh_miss",
"miss_m4_0y0_0hh_miss",
"miss_m4_00z_0hh_miss",
"miss_inv_000_h0h_miss",
"miss_m_0yz_h0h_miss",
"miss_m_x0z_h0h_miss",
"miss_m_xy0_h0h_miss",
"miss_m_xmxz_h0h_miss",
"miss_m_xymy_h0h_miss",
"miss_m_xymx_h0h_miss",
"miss_m_xyx_h0h_miss",
"miss_m_xxz_h0h_miss",
"miss_m_xyy_h0h_miss",
"miss_2_x00_h0h_miss",
"miss_2_0y0_h0h_miss",
"miss_2_00z_h0h_miss",
"miss_2_xx0_h0h_miss",
"miss_2_x0x_h0h_miss",
"miss_2_0yy_h0h_miss",
"miss_2_xmx0_h0h_miss",
"miss_2_mx0x_h0h_miss",
"miss_2_0myy_h0h_miss",
"miss_3_xxx_h0h_miss",
"miss_3_xmxmx_h0h_miss",
"miss_3_mxxmx_h0h_miss",
"miss_3_mxmxx_h0h_miss",
"miss_m3_xxx_h0h_miss",
"miss_m3_xmxmx_h0h_miss",
"miss_m3_mxxmx_h0h_miss",
"miss_m3_mxmxx_h0h_miss",
"miss_4_x00_h0h_miss",
"miss_4_0y0_h0h_miss",
"miss_4_00z_h0h_miss",
"miss_m4_x00_h0h_miss",
"miss_m4_0y0_h0h_miss",
"miss_m4_00z_h0h_miss",
"miss_inv_000_hh0_miss",
"miss_m_0yz_hh0_miss",
"miss_m_x0z_hh0_miss",
"miss_m_xy0_hh0_miss",
"miss_m_xmxz_hh0_miss",
"miss_m_xymy_hh0_miss",
"miss_m_xymx_hh0_miss",
"miss_m_xyx_hh0_miss",
"miss_m_xxz_hh0_miss",
"miss_m_xyy_hh0_miss",
"miss_2_x00_hh0_miss",
"miss_2_0y0_hh0_miss",
"miss_2_00z_hh0_miss",
"miss_2_xx0_hh0_miss",
"miss_2_x0x_hh0_miss",
"miss_2_0yy_hh0_miss",
"miss_2_xmx0_hh0_miss",
"miss_2_mx0x_hh0_miss",
"miss_2_0myy_hh0_miss",
"miss_3_xxx_hh0_miss",
"miss_3_xmxmx_hh0_miss",
"miss_3_mxxmx_hh0_miss",
"miss_3_mxmxx_hh0_miss",
"miss_m3_xxx_hh0_miss",
"miss_m3_xmxmx_hh0_miss",
"miss_m3_mxxmx_hh0_miss",
"miss_m3_mxmxx_hh0_miss",
"miss_4_x00_hh0_miss",
"miss_4_0y0_hh0_miss",
"miss_4_00z_hh0_miss",
"miss_m4_x00_hh0_miss",
"miss_m4_0y0_hh0_miss",
"miss_m4_00z_hh0_miss",
"miss_inv_000_hhh_miss",
"miss_m_0yz_hhh_miss",
"miss_m_x0z_hhh_miss",
"miss_m_xy0_hhh_miss",
"miss_m_xmxz_hhh_miss",
"miss_m_xymy_hhh_miss",
"miss_m_xymx_hhh_miss",
"miss_m_xyx_hhh_miss",
"miss_m_xxz_hhh_miss",
"miss_m_xyy_hhh_miss",
"miss_2_x00_hhh_miss",
"miss_2_0y0_hhh_miss",
"miss_2_00z_hhh_miss",
"miss_2_xx0_hhh_miss",
"miss_2_x0x_hhh_miss",
"miss_2_0yy_hhh_miss",
"miss_2_xmx0_hhh_miss",
"miss_2_mx0x_hhh_miss",
"miss_2_0myy_hhh_miss",
"miss_3_xxx_hhh_miss",
"miss_3_xmxmx_hhh_miss",
"miss_3_mxxmx_hhh_miss",
"miss_3_mxmxx_hhh_miss",
"miss_m3_xxx_hhh_miss",
"miss_m3_xmxmx_hhh_miss",
"miss_m3_mxxmx_hhh_miss",
"miss_m3_mxmxx_hhh_miss",
"miss_4_x00_hhh_miss",
"miss_4_0y0_hhh_miss",
"miss_4_00z_hhh_miss",
"miss_m4_x00_hhh_miss",
"miss_m4_0y0_hhh_miss",
"miss_m4_00z_hhh_miss",
"miss_inv_000_qqq_miss",
"miss_m_0yz_qqq_miss",
"miss_m_x0z_qqq_miss",
"miss_m_xy0_qqq_miss",
"miss_m_xmxz_qqq_miss",
"miss_m_xymy_qqq_miss",
"miss_m_xymx_qqq_miss",
"miss_m_xyx_qqq_miss",
"miss_m_xxz_qqq_miss",
"miss_m_xyy_qqq_miss",
"miss_2_x00_qqq_miss",
"miss_2_0y0_qqq_miss",
"miss_2_00z_qqq_miss",
"miss_2_xx0_qqq_miss",
"miss_2_x0x_qqq_miss",
"miss_2_0yy_qqq_miss",
"miss_2_xmx0_qqq_miss",
"miss_2_mx0x_qqq_miss",
"miss_2_0myy_qqq_miss",
"miss_3_xxx_qqq_miss",
"miss_3_xmxmx_qqq_miss",
"miss_3_mxxmx_qqq_miss",
"miss_3_mxmxx_qqq_miss",
"miss_m3_xxx_qqq_miss",
"miss_m3_xmxmx_qqq_miss",
"miss_m3_mxxmx_qqq_miss",
"miss_m3_mxmxx_qqq_miss",
"miss_4_x00_qqq_miss",
"miss_4_0y0_qqq_miss",
"miss_4_00z_qqq_miss",
"miss_m4_x00_qqq_miss",
"miss_m4_0y0_qqq_miss",
"miss_m4_00z_qqq_miss",
"miss_inv_000_3qqq_miss",
"miss_m_0yz_3qqq_miss",
"miss_m_x0z_3qqq_miss",
"miss_m_xy0_3qqq_miss",
"miss_m_xmxz_3qqq_miss",
"miss_m_xymy_3qqq_miss",
"miss_m_xymx_3qqq_miss",
"miss_m_xyx_3qqq_miss",
"miss_m_xxz_3qqq_miss",
"miss_m_xyy_3qqq_miss",
"miss_2_x00_3qqq_miss",
"miss_2_0y0_3qqq_miss",
"miss_2_00z_3qqq_miss",
"miss_2_xx0_3qqq_miss",
"miss_2_x0x_3qqq_miss",
"miss_2_0yy_3qqq_miss",
"miss_2_xmx0_3qqq_miss",
"miss_2_mx0x_3qqq_miss",
"miss_2_0myy_3qqq_miss",
"miss_3_xxx_3qqq_miss",
"miss_3_xmxmx_3qqq_miss",
"miss_3_mxxmx_3qqq_miss",
"miss_3_mxmxx_3qqq_miss",
"miss_m3_xxx_3qqq_miss",
"miss_m3_xmxmx_3qqq_miss",
"miss_m3_mxxmx_3qqq_miss",
"miss_m3_mxmxx_3qqq_miss",
"miss_4_x00_3qqq_miss",
"miss_4_0y0_3qqq_miss",
"miss_4_00z_3qqq_miss",
"miss_m4_x00_3qqq_miss",
"miss_m4_0y0_3qqq_miss",
"miss_m4_00z_3qqq_miss",
"miss_inv_000_q3qq_miss",
"miss_m_0yz_q3qq_miss",
"miss_m_x0z_q3qq_miss",
"miss_m_xy0_q3qq_miss",
"miss_m_xmxz_q3qq_miss",
"miss_m_xymy_q3qq_miss",
"miss_m_xymx_q3qq_miss",
"miss_m_xyx_q3qq_miss",
"miss_m_xxz_q3qq_miss",
"miss_m_xyy_q3qq_miss",
"miss_2_x00_q3qq_miss",
"miss_2_0y0_q3qq_miss",
"miss_2_00z_q3qq_miss",
"miss_2_xx0_q3qq_miss",
"miss_2_x0x_q3qq_miss",
"miss_2_0yy_q3qq_miss",
"miss_2_xmx0_q3qq_miss",
"miss_2_mx0x_q3qq_miss",
"miss_2_0myy_q3qq_miss",
"miss_3_xxx_q3qq_miss",
"miss_3_xmxmx_q3qq_miss",
"miss_3_mxxmx_q3qq_miss",
"miss_3_mxmxx_q3qq_miss",
"miss_m3_xxx_q3qq_miss",
"miss_m3_xmxmx_q3qq_miss",
"miss_m3_mxxmx_q3qq_miss",
"miss_m3_mxmxx_q3qq_miss",
"miss_4_x00_q3qq_miss",
"miss_4_0y0_q3qq_miss",
"miss_4_00z_q3qq_miss",
"miss_m4_x00_q3qq_miss",
"miss_m4_0y0_q3qq_miss",
"miss_m4_00z_q3qq_miss",
"miss_inv_000_qq3q_miss",
"miss_m_0yz_qq3q_miss",
"miss_m_x0z_qq3q_miss",
"miss_m_xy0_qq3q_miss",
"miss_m_xmxz_qq3q_miss",
"miss_m_xymy_qq3q_miss",
"miss_m_xymx_qq3q_miss",
"miss_m_xyx_qq3q_miss",
"miss_m_xxz_qq3q_miss",
"miss_m_xyy_qq3q_miss",
"miss_2_x00_qq3q_miss",
"miss_2_0y0_qq3q_miss",
"miss_2_00z_qq3q_miss",
"miss_2_xx0_qq3q_miss",
"miss_2_x0x_qq3q_miss",
"miss_2_0yy_qq3q_miss",
"miss_2_xmx0_qq3q_miss",
"miss_2_mx0x_qq3q_miss",
"miss_2_0myy_qq3q_miss",
"miss_3_xxx_qq3q_miss",
"miss_3_xmxmx_qq3q_miss",
"miss_3_mxxmx_qq3q_miss",
"miss_3_mxmxx_qq3q_miss",
"miss_m3_xxx_qq3q_miss",
"miss_m3_xmxmx_qq3q_miss",
"miss_m3_mxxmx_qq3q_miss",
"miss_m3_mxmxx_qq3q_miss",
"miss_4_x00_qq3q_miss",
"miss_4_0y0_qq3q_miss",
"miss_4_00z_qq3q_miss",
"miss_m4_x00_qq3q_miss",
"miss_m4_0y0_qq3q_miss",
"miss_m4_00z_qq3q_miss",
"miss_inv_000_q3q3q_miss",
"miss_m_0yz_q3q3q_miss",
"miss_m_x0z_q3q3q_miss",
"miss_m_xy0_q3q3q_miss",
"miss_m_xmxz_q3q3q_miss",
"miss_m_xymy_q3q3q_miss",
"miss_m_xymx_q3q3q_miss",
"miss_m_xyx_q3q3q_miss",
"miss_m_xxz_q3q3q_miss",
"miss_m_xyy_q3q3q_miss",
"miss_2_x00_q3q3q_miss",
"miss_2_0y0_q3q3q_miss",
"miss_2_00z_q3q3q_miss",
"miss_2_xx0_q3q3q_miss",
"miss_2_x0x_q3q3q_miss",
"miss_2_0yy_q3q3q_miss",
"miss_2_xmx0_q3q3q_miss",
"miss_2_mx0x_q3q3q_miss",
"miss_2_0myy_q3q3q_miss",
"miss_3_xxx_q3q3q_miss",
"miss_3_xmxmx_q3q3q_miss",
"miss_3_mxxmx_q3q3q_miss",
"miss_3_mxmxx_q3q3q_miss",
"miss_m3_xxx_q3q3q_miss",
"miss_m3_xmxmx_q3q3q_miss",
"miss_m3_mxxmx_q3q3q_miss",
"miss_m3_mxmxx_q3q3q_miss",
"miss_4_x00_q3q3q_miss",
"miss_4_0y0_q3q3q_miss",
"miss_4_00z_q3q3q_miss",
"miss_m4_x00_q3q3q_miss",
"miss_m4_0y0_q3q3q_miss",
"miss_m4_00z_q3q3q_miss",
"miss_inv_000_3qq3q_miss",
"miss_m_0yz_3qq3q_miss",
| |
<gh_stars>0
# -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Object-oriented filesystem path representation.
"""
import os
import sys
import errno
import base64
from os.path import isabs, exists, normpath, abspath, splitext
from os.path import basename, dirname, join as joinpath
from os import listdir, utime, stat
from stat import S_ISREG, S_ISDIR, S_IMODE, S_ISBLK, S_ISSOCK
from stat import S_IRUSR, S_IWUSR, S_IXUSR
from stat import S_IRGRP, S_IWGRP, S_IXGRP
from stat import S_IROTH, S_IWOTH, S_IXOTH
from zope.interface import Interface, Attribute, implementer
# Please keep this as light as possible on other Twisted imports; many, many
# things import this module, and it would be good if it could easily be
# modified for inclusion in the standard library. --glyph
from twisted.python.compat import comparable, cmp, unicode
from twisted.python.deprecate import deprecated
from twisted.python.runtime import platform
from incremental import Version
from twisted.python.win32 import ERROR_FILE_NOT_FOUND, ERROR_PATH_NOT_FOUND
from twisted.python.win32 import ERROR_INVALID_NAME, ERROR_DIRECTORY, O_BINARY
from twisted.python.win32 import WindowsError
from twisted.python.util import FancyEqMixin
_CREATE_FLAGS = (os.O_EXCL |
os.O_CREAT |
os.O_RDWR |
O_BINARY)
def _stub_islink(path):
"""
Always return C{False} if the operating system does not support symlinks.
@param path: A path string.
@type path: L{str}
@return: C{False}
@rtype: L{bool}
"""
return False
islink = getattr(os.path, 'islink', _stub_islink)
randomBytes = os.urandom
armor = base64.urlsafe_b64encode
class IFilePath(Interface):
"""
File path object.
A file path represents a location for a file-like-object and can be
organized into a hierarchy; a file path can can children which are
themselves file paths.
A file path has a name which unique identifies it in the context of its
parent (if it has one); a file path can not have two children with the same
name. This name is referred to as the file path's "base name".
A series of such names can be used to locate nested children of a file
path; such a series is referred to as the child's "path", relative to the
parent. In this case, each name in the path is referred to as a "path
segment"; the child's base name is the segment in the path.
When representing a file path as a string, a "path separator" is used to
delimit the path segments within the string. For a file system path, that
would be C{os.sep}.
Note that the values of child names may be restricted. For example, a file
system path will not allow the use of the path separator in a name, and
certain names (e.g. C{"."} and C{".."}) may be reserved or have special
meanings.
@since: 12.1
"""
sep = Attribute("The path separator to use in string representations")
def child(name):
"""
Obtain a direct child of this file path. The child may or may not
exist.
@param name: the name of a child of this path. C{name} must be a direct
child of this path and may not contain a path separator.
@return: the child of this path with the given C{name}.
@raise InsecurePath: if C{name} describes a file path that is not a
direct child of this file path.
"""
def open(mode="r"):
"""
Opens this file path with the given mode.
@return: a file-like object.
@raise Exception: if this file path cannot be opened.
"""
def changed():
"""
Clear any cached information about the state of this path on disk.
"""
def getsize():
"""
Retrieve the size of this file in bytes.
@return: the size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
"""
def getModificationTime():
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getStatusChangeTime():
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getAccessTime():
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def exists():
"""
Check if this file path exists.
@return: C{True} if the file at this file path exists, C{False}
otherwise.
@rtype: L{bool}
"""
def isdir():
"""
Check if this file path refers to a directory.
@return: C{True} if the file at this file path is a directory, C{False}
otherwise.
"""
def isfile():
"""
Check if this file path refers to a regular file.
@return: C{True} if the file at this file path is a regular file,
C{False} otherwise.
"""
def children():
"""
List the children of this path object.
@return: a sequence of the children of the directory at this file path.
@raise Exception: if the file at this file path is not a directory.
"""
def basename():
"""
Retrieve the final component of the file path's path (everything
after the final path separator).
@return: the base name of this file path.
@rtype: L{str}
"""
def parent():
"""
A file path for the directory containing the file at this file path.
"""
def sibling(name):
"""
A file path for the directory containing the file at this file path.
@param name: the name of a sibling of this path. C{name} must be a
direct sibling of this path and may not contain a path separator.
@return: a sibling file path of this one.
"""
class InsecurePath(Exception):
"""
Error that is raised when the path provided to L{FilePath} is invalid.
"""
class LinkError(Exception):
"""
An error with symlinks - either that there are cyclical symlinks or that
symlink are not supported on this platform.
"""
class UnlistableError(OSError):
"""
An exception which is used to distinguish between errors which mean 'this
is not a directory you can list' and other, more catastrophic errors.
This error will try to look as much like the original error as possible,
while still being catchable as an independent type.
@ivar originalException: the actual original exception instance, either an
L{OSError} or a L{WindowsError}.
"""
def __init__(self, originalException):
"""
Create an UnlistableError exception.
@param originalException: an instance of OSError.
"""
self.__dict__.update(originalException.__dict__)
self.originalException = originalException
class _WindowsUnlistableError(UnlistableError, WindowsError):
"""
This exception is raised on Windows, for compatibility with previous
releases of FilePath where unportable programs may have done "except
WindowsError:" around a call to children().
It is private because all application code may portably catch
L{UnlistableError} instead.
"""
def _secureEnoughString(path):
"""
Compute a string usable as a new, temporary filename.
@param path: The path that the new temporary filename should be able to be
concatenated with.
@return: A pseudorandom, 16 byte string for use in secure filenames.
@rtype: the type of C{path}
"""
secureishString = armor(randomBytes(16))[:16]
return _coerceToFilesystemEncoding(path, secureishString)
class AbstractFilePath(object):
"""
Abstract implementation of an L{IFilePath}; must be completed by a
subclass.
This class primarily exists to provide common implementations of certain
methods in L{IFilePath}. It is *not* a required parent class for
L{IFilePath} implementations, just a useful starting point.
"""
def getContent(self):
"""
Retrieve the contents of the file at this path.
@return: the contents of the file
@rtype: L{bytes}
"""
with self.open() as fp:
return fp.read()
def parents(self):
"""
Retrieve an iterator of all the ancestors of this path.
@return: an iterator of all the ancestors of this path, from the most
recent (its immediate parent) to the root of its filesystem.
"""
path = self
parent = path.parent()
# root.parent() == root, so this means "are we the root"
while path != parent:
yield parent
path = parent
parent = parent.parent()
def children(self):
"""
List the children of this path object.
@raise OSError: If an error occurs while listing the directory. If the
error is 'serious', meaning that the operation failed due to an access
violation, exhaustion of some kind of resource (file descriptors or
memory), OSError or a platform-specific variant will be raised.
@raise UnlistableError: If the inability to list the directory is due
to this path not existing or not being a directory, the more specific
OSError subclass L{UnlistableError} is raised instead.
@return: an iterable of all currently-existing children of this object.
"""
try:
subnames = self.listdir()
except WindowsError as winErrObj:
# Under Python 3.3 and higher on Windows, WindowsError is an
| |
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import os
import json
from datetime import datetime
from urllib import parse
import requests
from click import ClickException
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font, PatternFill
from openpyxl.styles.colors import Color, WHITE
from openpyxl.utils import quote_sheetname
from openpyxl.worksheet.datavalidation import DataValidation
from tqdm import trange
from connect.cli.core.constants import DEFAULT_BAR_FORMAT
from connect.cli.core.http import (
format_http_status,
handle_http_error,
)
from connect.cli.plugins.product.constants import PARAM_TYPES
from connect.cli.plugins.product.utils import (
get_col_headers_by_ws_type,
get_col_limit_by_ws_type,
get_json_object_for_param,
)
from connect.client import ClientError, ConnectClient, R
def _setup_cover_sheet(ws, product, location, client, media_path):
ws.title = 'General Information'
ws.column_dimensions['A'].width = 50
ws.column_dimensions['B'].width = 180
ws.merge_cells('A1:B1')
cell = ws['A1']
cell.fill = PatternFill('solid', start_color=Color('1565C0'))
cell.font = Font(sz=24, color=WHITE)
cell.alignment = Alignment(horizontal='center', vertical='center')
cell.value = 'Product information'
for i in range(3, 13):
ws[f'A{i}'].font = Font(sz=12)
ws[f'B{i}'].font = Font(sz=12)
ws['A3'].value = 'Account ID'
ws['B3'].value = product['owner']['id']
ws['A4'].value = 'Account Name'
ws['B4'].value = product['owner']['name']
ws['A5'].value = 'Product ID'
ws['B5'].value = product['id']
ws['A6'].value = 'Product Name'
ws['B6'].value = product['name']
ws['A7'].value = 'Export datetime'
ws['B7'].value = datetime.now().isoformat()
ws['A8'].value = 'Product Category'
ws['B8'].value = product['category']['name']
ws['A9'].value = 'Product Icon file name'
ws['A9'].font = Font(sz=14)
ws['B9'].value = f'{product["id"]}.{product["icon"].split(".")[-1]}'
_dump_image(
f'{location}{product["icon"]}',
f'{product["id"]}.{product["icon"].split(".")[-1]}',
media_path,
)
ws['A10'].value = 'Product Short Description'
ws['A10'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B10'].value = product['short_description']
ws['B10'].alignment = Alignment(
wrap_text=True,
)
ws['A11'].value = 'Product Detailed Description'
ws['A11'].alignment = Alignment(
horizontal='left',
vertical='top',
)
ws['B11'].value = product['detailed_description']
ws['B11'].alignment = Alignment(
wrap_text=True,
)
ws['A12'].value = 'Embedding description'
ws['B12'].value = product['customer_ui_settings']['description']
ws['B12'].alignment = Alignment(
wrap_text=True,
)
ws['A13'].value = 'Embedding getting started'
ws['B13'].value = product['customer_ui_settings']['getting_started']
ws['B13'].alignment = Alignment(
wrap_text=True,
)
categories = client.categories.all()
unassignable_cat = ['Cloud Services', 'All Categories']
categories_list = [
cat['name'] for cat in categories if cat['name'] not in unassignable_cat
]
ws['AA1'].value = 'Categories'
cat_row_idx = 2
for cat in categories_list:
ws[f'AA{cat_row_idx}'].value = cat
cat_row_idx += 1
categories_validation = DataValidation(
type='list',
formula1=f'{quote_sheetname("General Information")}!$AA$2:$AA${len(categories_list)}',
allow_blank=False,
)
ws.add_data_validation(categories_validation)
categories_validation.add('B8')
def _dump_image(image_location, image_name, media_path):
image = requests.get(image_location)
if image.status_code == 200:
with open(os.path.join(media_path, image_name), 'wb') as f:
f.write(image.content)
else:
raise ClickException(f"Error obtaining image from {image_location}")
def _setup_ws_header(ws, ws_type=None): # noqa: CCR001
if not ws_type:
ws_type = 'items'
color = Color('d3d3d3')
fill = PatternFill('solid', color)
cels = ws['A1': '{cell}1'.format(
cell=get_col_limit_by_ws_type(ws_type),
)]
col_headers = get_col_headers_by_ws_type(ws_type)
for cel in cels[0]:
ws.column_dimensions[cel.column_letter].width = 25
ws.column_dimensions[cel.column_letter].auto_size = True
cel.fill = fill
cel.value = col_headers[cel.column_letter]
if ws_type == 'params' and cel.value == 'JSON Properties':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'capabilities' and cel.value == 'Capability':
ws.column_dimensions[cel.column_letter].width = 50
elif ws_type == 'static_links' and cel.value == 'Url':
ws.column_dimensions[cel.column_letter].width = 100
elif ws_type == 'templates':
if cel.value == 'Content':
ws.column_dimensions[cel.column_letter].width = 100
if cel.value == 'Title':
ws.column_dimensions[cel.column_letter].width = 50
def _calculate_commitment(item):
period = item.get('period')
if not period:
return '-'
commitment = item.get('commitment')
if not commitment:
return '-'
count = commitment['count']
if count == 1:
return '-'
multiplier = commitment['multiplier']
if multiplier == 'billing_period':
if period == 'monthly':
years = count // 12
return '{quantity} year{plural}'.format(
quantity=years,
plural='s' if years > 1 else '',
)
else:
return '{years} years'.format(
years=count,
)
# One-time
return '-'
def _fill_param_row(ws, row_idx, param):
ws.cell(row_idx, 1, value=param['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=param['name']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=param['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 5, value=param['description']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=param['phase']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 7, value=param['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 8, value=param['type']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 9,
value=param['constraints']['required'] if param['constraints']['required'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 10,
value=param['constraints']['unique'] if param['constraints']['unique'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 11,
value=param['constraints']['hidden'] if param['constraints']['hidden'] else '-',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 12,
value=get_json_object_for_param(param),
).alignment = Alignment(
wrap_text=True,
)
ws.cell(
row_idx, 13, value=param['events']['created']['at'],
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 14, value=param['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_media_row(ws, row_idx, media, location, product, media_path):
ws.cell(row_idx, 1, value=media['position'])
ws.cell(row_idx, 2, value=media['id'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=media['type'])
ws.cell(row_idx, 5, value=f'{media["id"]}.{media["thumbnail"].split(".")[-1]}')
_dump_image(
f'{location}{media["thumbnail"]}',
f'{media["id"]}.{media["thumbnail"].split(".")[-1]}',
media_path,
)
ws.cell(row_idx, 6, value='-' if media['type'] == 'image' else media['url'])
def _fill_template_row(ws, row_idx, template):
ws.cell(row_idx, 1, value=template['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 2, value=template['title']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 3, value='-').alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 4, value=template['scope']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 5, value=template['type'] if 'type' in template else 'fulfillment',
).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 6, value=template['body']).alignment = Alignment(
wrap_text=True,
)
ws.cell(row_idx, 7, value=template['events']['created']['at']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx, 8, value=template['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
def _fill_action_row(ws, row_idx, action):
ws.cell(row_idx, 1, value=action['id'])
ws.cell(row_idx, 2, value=action['action'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=action['name'])
ws.cell(row_idx, 5, value=action['title'])
ws.cell(row_idx, 6, value=action['description'])
ws.cell(row_idx, 7, value=action['scope'])
ws.cell(row_idx, 8, value=action['events']['created']['at'])
ws.cell(row_idx, 9, value=action['events'].get('updated', {}).get('at'))
def _fill_configuration_row(ws, row_idx, configuration, conf_id):
ws.cell(row_idx, 1, value=conf_id)
ws.cell(row_idx, 2, value=configuration['parameter']['id'])
ws.cell(row_idx, 3, value=configuration['parameter']['scope'])
ws.cell(row_idx, 4, value='-')
ws.cell(row_idx, 5, value=configuration['item']['id'] if 'item' in configuration else '-')
ws.cell(row_idx, 6, value=configuration['item']['name'] if 'item' in configuration else '-')
ws.cell(row_idx, 7, value=configuration['marketplace']['id'] if 'marketplace' in configuration else '-')
ws.cell(row_idx, 8,
value=configuration['marketplace']['name'] if 'marketplace' in configuration else '-')
if 'structured_value' in configuration:
value = configuration['structured_value']
value = json.dumps(value, indent=4, sort_keys=True)
ws.cell(row_idx, 9, value=value).alignment = Alignment(wrap_text=True)
elif 'value' in configuration:
ws.cell(row_idx, 9, value=configuration['value'])
else:
ws.cell(row_idx, 9, value='-')
def _fill_item_row(ws, row_idx, item):
ws.cell(row_idx, 1, value=item['id'])
ws.cell(row_idx, 2, value=item['mpn'])
ws.cell(row_idx, 3, value='-')
ws.cell(row_idx, 4, value=item['display_name'])
ws.cell(row_idx, 5, value=item['description'])
ws.cell(row_idx, 6, value=item['type'])
ws.cell(row_idx, 7, value=item['precision'])
ws.cell(row_idx, 8, value=item['unit']['unit'])
period = item.get('period', 'monthly')
if period.startswith('years_'):
period = f'{period.rsplit("_")[-1]} years'
ws.cell(row_idx, 9, value=period)
ws.cell(row_idx, 10, value=_calculate_commitment(item))
ws.cell(row_idx, 11, value=item['status'])
ws.cell(row_idx, 12, value=item['events']['created']['at'])
ws.cell(row_idx, 13, value=item['events'].get('updated', {}).get('at'))
def _calculate_configuration_id(configuration):
conf_id = configuration['parameter']['id']
if 'item' in configuration and 'id' in configuration['item']:
conf_id = f'{conf_id}#{configuration["item"]["id"]}'
else:
conf_id = f'{conf_id}#'
if 'marketplace' in configuration and 'id' in configuration['marketplace']:
conf_id = f'{conf_id}#{configuration["marketplace"]["id"]}'
else:
conf_id = f'{conf_id}#'
return conf_id
def _dump_actions(ws, client, product_id, silent):
_setup_ws_header(ws, 'actions')
row_idx = 2
actions = client.products[product_id].actions.all()
count = actions.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(scope_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for action in actions:
progress.set_description(f'Processing action {action["id"]}')
progress.update(1)
_fill_action_row(ws, row_idx, action)
action_validation.add(f'C{row_idx}')
scope_validation.add(f'G{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_configuration(ws, client, product_id, silent):
_setup_ws_header(ws, 'configurations')
row_idx = 2
configurations = client.products[product_id].configurations.all()
count = configurations.count()
action_validation = DataValidation(
type='list',
formula1='"-,update,delete"',
allow_blank=False,
)
if count == 0:
return
ws.add_data_validation(action_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for configuration in configurations:
conf_id = _calculate_configuration_id(configuration)
progress.set_description(f'Processing parameter configuration {conf_id}')
progress.update(1)
_fill_configuration_row(ws, row_idx, configuration, conf_id)
action_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_parameters(ws, client, product_id, param_type, silent):
_setup_ws_header(ws, 'params')
rql = R().phase.eq(param_type)
row_idx = 2
params = client.products[product_id].parameters.filter(rql)
count = params.count()
if count == 0:
# Product without params is strange, but may exist
return
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"{params}"'.format(
params=','.join(PARAM_TYPES),
),
allow_blank=False,
)
ordering_fulfillment_scope_validation = DataValidation(
type='list',
formula1='"asset,tier1,tier2"',
allow_blank=False,
)
configuration_scope_validation = DataValidation(
type='list',
formula1='"product,marketplace,item,item_marketplace"',
allow_blank=False,
)
bool_validation = DataValidation(
type='list',
formula1='"True,-"',
allow_blank=False,
)
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
ws.add_data_validation(ordering_fulfillment_scope_validation)
ws.add_data_validation(configuration_scope_validation)
ws.add_data_validation(bool_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for param in params:
progress.set_description(f'Processing {param_type} parameter {param["id"]}')
progress.update(1)
_fill_param_row(ws, row_idx, param)
action_validation.add(f'C{row_idx}')
if param['phase'] == 'configuration':
configuration_scope_validation.add(f'G{row_idx}')
else:
ordering_fulfillment_scope_validation.add(f'G{row_idx}')
type_validation.add(f'H{row_idx}')
bool_validation.add(f'I{row_idx}')
bool_validation.add(f'J{row_idx}')
bool_validation.add(f'K{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_media(ws, client, product_id, silent, media_location, media_path):
_setup_ws_header(ws, 'media')
row_idx = 2
medias = client.products[product_id].media.all()
count = medias.count()
action_validation = DataValidation(
type='list',
formula1='"-,create,update,delete"',
allow_blank=False,
)
type_validation = DataValidation(
type='list',
formula1='"image,video"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(type_validation)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
for media in medias:
progress.set_description(f'Processing media {media["id"]}')
progress.update(1)
_fill_media_row(ws, row_idx, media, media_location, product_id, media_path)
action_validation.add(f'C{row_idx}')
type_validation.add(f'D{row_idx}')
row_idx += 1
progress.close()
print()
def _dump_external_static_links(ws, product, silent):
_setup_ws_header(ws, 'static_links')
row_idx = 2
count = len(product['customer_ui_settings']['download_links'])
count = count + len(product['customer_ui_settings']['documents'])
action_validation = DataValidation(
type='list',
formula1='"-,create,delete"',
allow_blank=False,
)
link_type = DataValidation(
type='list',
formula1='"Download,Documentation"',
allow_blank=False,
)
if count > 0:
ws.add_data_validation(action_validation)
ws.add_data_validation(link_type)
progress = trange(0, count, disable=silent, leave=True, bar_format=DEFAULT_BAR_FORMAT)
progress.set_description("Processing static links")
for link in product['customer_ui_settings']['download_links']:
progress.update(1)
ws.cell(row_idx, 1, value='Download')
ws.cell(row_idx, 2, | |
types
plot_type = type
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
if plot_type in ("line", "scatter", "polygon", "bar", "filled_line",
"segment"):
# Tie data to the index range
if len(data) == 1:
if self.default_index is None:
# Create the default index based on the length of the first
# data series
value = self._get_or_create_datasource(data[0])
self.default_index = ArrayDataSource(arange(len(value.get_data())),
sort_order="none")
self.index_range.add(self.default_index)
index = self.default_index
else:
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
data = data[1:]
# Tie data to the value_range and create the renderer for each data
new_plots = []
simple_plot_types = ("line", "scatter", "segment")
for value_name in data:
value = self._get_or_create_datasource(value_name)
self.value_range.add(value)
if plot_type in simple_plot_types:
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("color") == "auto":
self._auto_color_idx = \
(self._auto_color_idx + 1) % len(self.auto_colors)
styles["color"] = self.auto_colors[self._auto_color_idx]
elif plot_type in ("polygon", "filled_line"):
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("edge_color") == "auto":
self._auto_edge_color_idx = \
(self._auto_edge_color_idx + 1) % len(self.auto_colors)
styles["edge_color"] = self.auto_colors[self._auto_edge_color_idx]
if styles.get("face_color") == "auto":
self._auto_face_color_idx = \
(self._auto_face_color_idx + 1) % len(self.auto_colors)
styles["face_color"] = self.auto_colors[self._auto_face_color_idx]
elif plot_type == 'bar':
cls = self.renderer_map[plot_type]
# handle auto-coloring request
if styles.get("color") == "auto":
self._auto_color_idx = \
(self._auto_color_idx + 1) % len(self.auto_colors)
styles["fill_color"] = self.auto_colors[self._auto_color_idx]
else:
raise ValueError("Unhandled plot type: " + plot_type)
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
plot = cls(index=index,
value=value,
index_mapper=imap,
value_mapper=vmap,
orientation=self.orientation,
origin = origin,
**styles)
self.add(plot)
new_plots.append(plot)
if plot_type == 'bar':
# For bar plots, compute the ranges from the data to make the
# plot look clean.
def custom_index_func(data_low, data_high, margin, tight_bounds):
""" Compute custom bounds of the plot along index (in
data space).
"""
bar_width = styles.get('bar_width', cls().bar_width)
plot_low = data_low - bar_width
plot_high = data_high + bar_width
return plot_low, plot_high
if self.index_range.bounds_func is None:
self.index_range.bounds_func = custom_index_func
def custom_value_func(data_low, data_high, margin, tight_bounds):
""" Compute custom bounds of the plot along value (in
data space).
"""
plot_low = data_low - (data_high-data_low)*0.1
plot_high = data_high + (data_high-data_low)*0.1
return plot_low, plot_high
if self.value_range.bounds_func is None:
self.value_range.bounds_func = custom_value_func
self.index_range.tight_bounds = False
self.value_range.tight_bounds = False
self.index_range.refresh()
self.value_range.refresh()
self.plots[name] = new_plots
elif plot_type in ("text"):
if len(data) != 3:
raise ValueError("Text plots require (index, value, text) data")
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
value = self._get_or_create_datasource(data[1])
self.value_range.add(value)
text = self._get_or_create_datasource(data[2])
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
value=value,
value_mapper=vmap,
text=text,
orientation=self.orientation,
origin=origin,
**styles)
self.add(plot)
self.plots[name] = [plot]
elif plot_type in ("cmap_scatter", "cmap_segment"):
if plot_type == "cmap_scatter" and len(data) != 3:
raise ValueError("Colormapped scatter plots require (index, value, color) data")
elif len(data) > 4 or len(data) < 3:
raise ValueError("Colormapped segment plots require (index, value, color) or (index, value, color, width) data")
index = self._get_or_create_datasource(data[0])
if self.default_index is None:
self.default_index = index
self.index_range.add(index)
value = self._get_or_create_datasource(data[1])
self.value_range.add(value)
color = self._get_or_create_datasource(data[2])
if "color_mapper" not in styles:
raise ValueError("Scalar 2D data requires a color_mapper.")
colormap = styles.pop("color_mapper")
if self.color_mapper is not None and self.color_mapper.range is not None:
color_range = self.color_mapper.range
else:
color_range = DataRange1D()
if isinstance(colormap, AbstractColormap):
self.color_mapper = colormap
if colormap.range is None:
color_range.add(color)
colormap.range = color_range
elif callable(colormap):
color_range.add(color)
self.color_mapper = colormap(color_range)
else:
raise ValueError("Unexpected colormap %r in plot()." % colormap)
if self.color_mapper is not None and self.color_mapper.range is not None:
color_range = self.color_mapper.range
else:
color_range = DataRange1D()
if len(data) == 4:
size = self._get_or_create_datasource(data[3])
size_range = DataRange1D()
size_range.add(size)
size_min = styles.pop("size_min", 1)
size_max = styles.pop("size_max", 10)
sizemap = LinearMapper(low_pos=size_min, high_pos=size_max,
range=size_range)
if self.index_scale == "linear":
imap = LinearMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
else:
imap = LogMapper(range=self.index_range,
stretch_data=self.index_mapper.stretch_data)
if self.value_scale == "linear":
vmap = LinearMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
else:
vmap = LogMapper(range=self.value_range,
stretch_data=self.value_mapper.stretch_data)
cls = self.renderer_map[plot_type]
plot = cls(index=index,
index_mapper=imap,
value=value,
value_mapper=vmap,
color_data=color,
color_mapper=self.color_mapper,
orientation=self.orientation,
origin=origin,
**styles)
if len(data) == 4:
plot.width_data = size
plot.width_mapper = sizemap
plot.width_by_data = True
self.add(plot)
self.plots[name] = [plot]
else:
raise ValueError("Unknown plot type: " + plot_type)
return self.plots[name]
def img_plot(self, data, name=None, colormap=None,
xbounds=None, ybounds=None, origin=None, hide_grids=True,
**styles):
""" Adds image plots to this Plot object.
If *data* has shape (N, M, 3) or (N, M, 4), then it is treated as RGB or
RGBA (respectively) and *colormap* is ignored.
If *data* is an array of floating-point data, then a colormap can
be provided via the *colormap* argument, or the default of 'Spectral'
will be used.
*Data* should be in row-major order, so that xbounds corresponds to
*data*'s second axis, and ybounds corresponds to the first axis.
Parameters
----------
data : string
The name of the data array in self.plot_data
name : string
The name of the plot; if omitted, then a name is generated.
xbounds, ybounds : string, tuple, or ndarray
Bounds where this image resides. Bound may be: a) names of
data in the plot data; b) tuples of (low, high) in data space,
c) 1D arrays of values representing the pixel boundaries (must
be 1 element larger than underlying data), or
d) 2D arrays as obtained from a meshgrid operation
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
hide_grids : bool, default True
Whether or not to automatically hide the grid lines on the plot
styles : series of keyword arguments
Attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
value = self._get_or_create_datasource(data)
array_data = value.get_data()
if len(array_data.shape) == 3:
if array_data.shape[2] not in (3,4):
raise ValueError("Image plots require color depth of 3 or 4.")
cls = self.renderer_map["img_plot"]
kwargs = dict(**styles)
else:
if colormap is None:
if self.color_mapper is None:
colormap = Spectral(DataRange1D(value))
else:
colormap = self.color_mapper
elif isinstance(colormap, AbstractColormap):
if colormap.range is None:
colormap.range = DataRange1D(value)
else:
colormap = colormap(DataRange1D(value))
self.color_mapper = colormap
cls = self.renderer_map["cmap_img_plot"]
kwargs = dict(value_mapper=colormap, **styles)
return self._create_2d_plot(cls, name, origin, xbounds, ybounds, value,
hide_grids, cell_plot=True, **kwargs)
def contour_plot(self, data, type="line", name=None, poly_cmap=None,
xbounds=None, ybounds=None, origin=None, hide_grids=True, **styles):
""" Adds contour plots to this Plot object.
Parameters
----------
data : string
The name of the data array in self.plot_data, which must be
floating point data.
type : comma-delimited string of "line", "poly"
The type of contour plot to add. If the value is "poly"
and no colormap is provided via the *poly_cmap* argument, then
a default colormap of 'Spectral' is used.
name : string
The name of the plot; if omitted, then a name is generated.
poly_cmap : string
The name of the color-map function to call (in
chaco.default_colormaps) or an AbstractColormap instance
to use for contour poly plots (ignored for contour line plots)
xbounds, ybounds : string, tuple, or ndarray
Bounds where this image resides. Bound may be: a) names of
data in the plot data; b) tuples of (low, high) in data space,
c) 1D arrays of values representing the pixel boundaries (must
be 1 element larger than underlying data), or
d) 2D arrays as obtained from a meshgrid operation
origin : string
Which corner the origin of this plot should occupy:
"bottom left", "top left", "bottom right", "top right"
hide_grids : bool, default True
Whether or not to automatically hide the grid lines on the plot
styles : series of keyword arguments
Attributes and values that apply to one or more of the
plot types requested, e.g.,'line_color' or 'line_width'.
"""
if name is None:
name = self._make_new_plot_name()
if origin is None:
origin = self.default_origin
value = self._get_or_create_datasource(data)
if value.value_depth != 1:
raise ValueError("Contour plots require 2D scalar field")
if type == "line":
cls = self.renderer_map["contour_line_plot"]
kwargs = dict(**styles)
# if | |
<reponame>supriyasingh01/github_basics<filename>Internetworking Distributed Project/finalProject/ovs/pox-master/pox/lib/recoco/recoco.py
# Copyright 2011 <NAME>
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from collections import deque
from Queue import PriorityQueue
from Queue import Queue
import time
import threading
from threading import Thread
import select
import traceback
import os
import socket
import pox.lib.util
import random
from pox.lib.epoll_select import EpollSelect
CYCLE_MAXIMUM = 2
# A ReturnFunction can return this to skip a scheduled slice at the last
# moment.
ABORT = object()
defaultScheduler = None
nextTaskID = 0
def generateTaskID ():
global nextTaskID
nextTaskID += 1
return nextTaskID
class BaseTask (object):
id = None
#running = False
priority = 1
@classmethod
def new (cls, *args, **kw):
"""
Creates a task and starts it on the default scheduler with the
default priority.
"""
o = cls(*args, **kw)
o.start(fast=True)
return o
def __init__ (self, *args, **kw):
#NOTE: keep in sync with Task.__init__ !
# (better yet, refactor)
self.id = generateTaskID()
self.gen = self.run(*args, **kw)
self.rv = None
self.rf = None # ReturnFunc
def start (self, scheduler = None, priority = None, fast = False):
"""
Schedules this task.
See Scheduler.schedule() and Scheduler.fast_schedule() for the meaning
of the 'fast' argument.
"""
if scheduler is None: scheduler = defaultScheduler
if priority != None: self.priority = priority
if fast:
scheduler.fast_schedule(self)
else:
scheduler.schedule(self)
def execute (self):
if self.rf is not None:
v = self.rf(self)
self.rf = None
self.rv = None
if v == ABORT:
return False
else:
v = self.rv
self.rv = None
return self.gen.send(v)
def run (self):
print("Dummy task")
yield 0
class Task (BaseTask):
"""
Provides an interface close to threading.Thread
"""
def __init__ (self, group=None, target=None, name=None, args=(), kwargs={}):
#NOTE: keep in sync with BaseTask.__init__ !
# (better yet, refactor)
assert(group == None) # Not supported
self.id = generateTaskID()
self.rv = None
self.name = name
if name == None: self.name = str(self.id)
self.target = target
self.args = args
self.kwargs = kwargs
self.gen = self.run(*args, **kwargs)
BaseTask.__init__(self)
def run (self):
g = self.target(*self.args, **self.kwargs)
g.next()
while True:
g.send((yield))
def __str__ (self):
return "<" + self.__class__.__name__ + "/tid" + str(self.name) + ">"
class Scheduler (object):
""" Scheduler for Tasks """
def __init__ (self, isDefaultScheduler = None, startInThread = True,
daemon = False, useEpoll=False):
self._ready = deque()
self._hasQuit = False
self._selectHub = SelectHub(self, useEpoll=useEpoll)
self._thread = None
self._event = threading.Event()
self._lock = threading.Lock()
self._callLaterTask = None
self._allDone = False
global defaultScheduler
if isDefaultScheduler or (isDefaultScheduler is None and
defaultScheduler is None):
defaultScheduler = self
if startInThread:
self.runThreaded(daemon)
def __del__ (self):
self._hasQuit = True
super(Scheduler, self).__del__()
def callLater (self, func, *args, **kw):
"""
Calls func with the given arguments at some later point, within this
scheduler. This is a good way for another thread to call something in
a co-op-thread-safe manner.
"""
with self._lock:
if self._callLaterTask is None:
self._callLaterTask = CallLaterTask()
self._callLaterTask.start()
self._callLaterTask.callLater(func, *args, **kw)
def runThreaded (self, daemon = False):
self._thread = Thread(target = self.run)
self._thread.daemon = daemon
self._thread.start()
def synchronized (self):
return Synchronizer(self)
def schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
Unlike fast_schedule(), this method will not schedule a task to run
multiple times. The one exception is if a Task actually schedules
itself. The easiest way to avoid this is simply not to do it.
See fast_schedule() and ScheduleTask for more info.
"""
if threading.current_thread() is self._thread:
# We're know we're good.
#TODO: Refactor the following with ScheduleTask
if task in self._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", task)
return False
self.fast_schedule(task, first)
return True
st = ScheduleTask(self, task)
st.start(fast=True)
def fast_schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
This method does not protect you from scheduling the same Task more
than once, which you probably really don't want to do.
If you are scheduling an existing Task (waking it) from another Task,
you should either implement your own logic to ensure that you don't
schedule it multiple times, or you should just use schedule().
If you are scheduling an existing Task (waking it) from any thread
besides the one the scheduler is running on, there's a race condition
which makes it nontrivial to ensure that multiple schedulings never
happen, and you should just use schedule() for such Tasks.
If you are scheduling a new Task that you just created, this method
is always safe.
"""
# Sanity check. Won't catch all cases.
assert task not in self._ready
if first:
self._ready.appendleft(task)
else:
self._ready.append(task)
self._event.set()
def quit (self):
self._hasQuit = True
def run (self):
try:
while self._hasQuit == False:
if len(self._ready) == 0:
self._event.wait(CYCLE_MAXIMUM) # Wait for a while
self._event.clear()
if self._hasQuit: break
r = self.cycle()
finally:
#print("Scheduler done")
self._hasQuit = True
self._selectHub._cycle()
self._allDone = True
def cycle (self):
#if len(self._ready) == 0: return False
# Patented hilarious priority system
#TODO: Replace it with something better
t = None
try:
while True:
t = self._ready.popleft()
if t >= 1: break
if random.random() >= t.priority: break
if len(self._ready) == 1: break
self._ready.append(t)
except IndexError:
return False
#print(len(self._ready), "tasks")
try:
rv = t.execute()
except StopIteration:
return True
except:
try:
print("Task", t, "caused exception and was de-scheduled")
traceback.print_exc()
except:
pass
return True
if isinstance(rv, BlockingOperation):
try:
rv.execute(t, self)
except:
print("Task", t, "caused exception during a blocking operation and " +
"was de-scheduled")
traceback.print_exc()
elif rv is False:
# Just unschedule/sleep
#print "Unschedule", t, rv
pass
elif type(rv) == int or type(rv) == long or type(rv) == float:
# Sleep time
if rv == 0:
#print "sleep 0"
self._ready.append(t)
else:
self._selectHub.registerTimer(t, rv)
elif rv == None:
raise RuntimeError("Must yield a value!")
return True
#TODO: Read() and Write() BlockingOperations that use nonblocking sockets with
# SelectHub and do post-processing of the return value.
class BlockingOperation (object):
"""
A base class for what can be thought of as syscalls for Tasks.
The separation between __init__ and execute may seem sort of artificial, but
it serves an actual purpose, which is that it makes it impossible for a task
to accidentally start to make a syscall (by instantiating a BlockingOperation)
without actually yielding.
"""
def __init__ (self):
""" When the syscall is made by a task, this is executed """
pass
def execute (self, task, scheduler):
""" Scheduler calls this to actually execute the syscall """
pass
class CallBlocking (BlockingOperation):
"""
Syscall that calls an actual blocking operation (like a real .recv()).
In order to keep from blocking, it calls it on another thread.
The return value is (ret_val, exc_info), one of which is always None.
"""
@classmethod
def new (_cls, _func, *_args, **_kw):
return _cls(_func, *_args, **_kw)
def __init__ (self, func, args=(), kw={}):
self.t = None
self.scheduler = None
self.task = None
self.func = func
self.args = args
self.kw = kw
def _proc (self):
try:
self.task.rv = (self.func(*self.args, **self.kw), None)
except:
import sys
self.task.rv = (None, sys.exc_info())
self.scheduler.fast_schedule(self.task)
def execute (self, task, scheduler):
self.task = task
self.scheduler = scheduler
#NOTE: It might be nice to use a pool here
self.t = threading.Thread(target=self._proc)
#pool.add(self._proc)
self.t.daemon = True
self.t.start()
class Exit (BlockingOperation):
"""
Syscall that | |
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import umap
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from data.data_loader import get_data_ts_onh_mac
from losses import elbo_general_timeseries
from losses import mae_globalmean, mae_loss
from models.multiodal_latentodegru_pretrain import MultimodalLatentODE
from utils.utils import save_model, RunningAverageMeter, select_modalities, subsample
# from ode.viz_latent import latent_viz, latent_viz_random
adjoint = False
if adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
print("CCC GPU:", os.environ.get('CUDA_VISIBLE_DEVICES'))
GPU = 0
device = torch.device('cuda:' + str(GPU)
if torch.cuda.is_available() else 'cpu')
print('Device:', device.type)
print('HHYU torch.cuda.device:', torch.cuda.device(GPU))
if device.type != 'cpu':
print('HHYU torch.cuda.get_device_name:', torch.cuda.get_device_name(GPU))
BATCH_SIZE = 32
image_dim = 64
number_pixel_out = image_dim * image_dim * 1
one_ = torch.tensor(1.0).to(device)
zero_ = torch.tensor(0.0).to(device)
def create_vft_mask(vft_imgs):
N, t, c, H, W = vft_imgs.shape
temp = torch.sum(vft_imgs, dim=[0, 1, 2], keepdim=True)
temp = torch.where(temp > 0, one_, zero_)
temp = temp.repeat((N, t, c, 1, 1))
return temp
def evaluate_reconstruction_error(config, data, mode='rec', nv_fc=-1):
"""
Compute the reconstruction from different modality inputs
:param config:
:param data:
:return:
"""
assert mode in ['rec', 'forecast'], 'expected one of [rec, forecast]'
if (mode == 'forecast'):
assert nv_fc > 0, 'number of visits to used for forecastin as input shoulld be provided'
model = config.model.eval()
comb = [[1, 1, 1], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
comb = select_modalities(comb, config.MODALITIES)
with torch.no_grad():
ds_val_batches, ts_val, val_dx = data.get_val(dx_filter=None)
rnfl_xi_val = ds_val_batches[0]
gcl_xi_val = ds_val_batches[1]
#vft_xi_val = ds_val_batches[2]
def evaluate_rec_error(inputs, modalities):
inputs_ = subsample(inputs, modalities)
outlist, mulogvar = model(ts=ts_val, x_list=inputs_)
[pred_rnfl, pred_gcl, pred_vft] = map(lambda x: x if x is None else torch.sigmoid(x), outlist)
NoneError = torch.from_numpy(np.asanyarray([-100]))
reshape = lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2], x.shape[3], x.shape[4]))
#mae_globalmean
rnfl_xi_val, gcl_xi_val, vft_xi_val = [inp[:, :nv_fc, :, :, :] if inp is not None else None for inp in ds_val_batches]
vft_mask = create_vft_mask(vft_xi_val)
error0 = torch.mean(
mae_loss(reshape(rnfl_xi_val[:, :]) * 200,
reshape(pred_rnfl[:, :]) * 200)) if pred_rnfl is not None else NoneError
error1 = torch.mean(mae_loss(gcl_xi_val * 200, pred_gcl * 200)) if pred_gcl is not None else NoneError
error2 = torch.mean(mae_loss(reshape(vft_xi_val[:, :]) * 40, reshape(pred_vft[:, :]) * 40,
mask=reshape(vft_mask[:, :]))) if pred_vft is not None else NoneError
return [error0, error1, error2], [pred_rnfl, pred_gcl, pred_vft]
masks = [None, None, None]
errors = [];
inputs_modalities = []
preds_all = []
inputs_all=[]
# if (config.MODALITIES[1] == 1): # if RNFL is used in training
for c in comb:
x_list_c = subsample(ds_val_batches, c)
if(mode=='rec'):
error, preds = evaluate_rec_error(x_list_c, c)
else:
error, preds = evaluate_forecast_error(model,ts_val,x_list_c,masks,c,nv_fc)
error = [e.cpu().numpy() for e in error]
error = [float(e) for e in error]
error = subsample(error, config.MODALITIES)
preds = subsample(preds, config.MODALITIES)
preds_all.append(preds)
inputs_modalities.append(c)
errors.append(error)
inputs_all.append(x_list_c)
return errors, inputs_modalities, preds_all, inputs_all
def plot_z(mu, val_dx, save_path):
labels = {2: 'Glaucoma', 1: 'GS', 0: 'Normal'}
replace = lambda x: labels[x]
groups = np.array(list(map(replace, val_dx)))
cdict = {'Glaucoma': 'red', 'Normal': 'green', 'GS': 'yellow'}
fig, ax = plt.subplots()
for g in np.unique(groups):
ix = np.where(groups == g)
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.scatter(mu[ix, 0], mu[ix, 1], color="None", edgecolors=cdict[g], linewidth=2, label=g, s=100)
ax.legend()
plt.savefig(save_path)
plt.close()
def plot_zembedded(mu, val_dx, save_path, type='tsne'):
assert type in ['tsne', 'umap'], 'type can be one of umap or tsne'
labels = {2: 'Glaucoma', 1: 'GS', 0: 'Normal'}
if (type == 'tsne'):
mu = TSNE(n_components=2, perplexity=30).fit_transform(mu)
else:
mu = umap.UMAP(n_neighbors=10,
min_dist=0.1, n_components=2,
metric='euclidean').fit_transform(mu)
replace = lambda x: labels[x]
groups = np.array(list(map(replace, val_dx)))
cdict = {'Glaucoma': 'red', 'Normal': 'green', 'GS': 'yellow'}
fig, ax = plt.subplots()
for g in np.unique(groups):
ix = np.where(groups == g)
ax.scatter(mu[ix, 0], mu[ix, 1], color="None", edgecolors=cdict[g], linewidth=2, label=g, s=100)
ax.legend()
plt.savefig(save_path)
plt.close()
def subsampled_elbo_losses(model, t_ts, inputs, modality_flags, annealing_factor=1.,
batch_size=None):
"""
:param model:
:param t_ts:
:param inputs: list of input modalities; see config and data loader for the order
:param modality_flags: array of binary number, e.g [1,1,0]; 0 entry denotes the modailty not to be used
:param annealing_factor:
:param batch_size:
:return:
"""
assert len(inputs) == 3, 'size of input list should be 3'
comb = [[1, 1, 1], [1, 1, 0], [0, 1, 1], [1, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
# comb = [[1, 1, 0], [0, 1, 0], [1, 0, 0] ]
comb = select_modalities(comb, modality_flags)
#print(hhyu subsampled_elbo_losses, comb=", comb)
assert len(comb) > 0, 'no modality is selected'
loss = 0
KLD = 0
for c in comb:
# print('LOss for ', str(c))
assert len(c) == len(modality_flags), 'modality_flags array size should match combintation array'
# c = [a * b for a, b in zip(c, modality_flags)]
input_xi_sub = subsample(inputs, c)
pred_list, [mu, logvar] = model(t_ts, input_xi_sub)
# if the loss of gcl or vft not reqired then set the correspndng index in pred_list and input_ts to None before passing below
pred_list_ = subsample(pred_list, c)
inputs_ = subsample(inputs, c)
# pred_list[2] = None
# inputs_ts[2] = None
# pred_list[1] = None
# inputs_ts[1] = None
inputs_ = [inp[:, :1, :, :, :] if inp is not None else None for inp in inputs_]
loss_, KLD_ = elbo_general_timeseries(preds=pred_list_, gts=inputs_, mu=mu, logvar=logvar,
annealing_factor=annealing_factor, loss_type='ce')
loss += loss_
KLD += KLD_
return loss, KLD
class VAEData:
def get_train_minibatch(self, batch_size):
raise NotImplementedError('imlimentation required')
def get_val(self):
raise NotImplementedError('imlimentation required')
def size_train(self):
raise NotImplementedError('imlimentation required')
def size_val(self):
raise NotImplementedError('imlimentation required')
from data.utils import resize_stack
def process_maps(train, val, test):
train = np.vstack([train, test])
N, t, H, W, c = train.shape
train = train.transpose([0, 1, 4, 2, 3]) # (N,t, c, H,W)
N, t, H, W, c = val.shape
val = val.transpose([0, 1, 4, 2, 3]) # (N,t,c,H,W)
train = torch.from_numpy(train).float().to(device)
val = torch.from_numpy(val).float().to(device)
return train, val
def process_labels(train, val, test):
train = np.vstack([train, test])
train = torch.from_numpy(train).float().to(device)
val = torch.from_numpy(val).float().to(device)
return train, val
from cutils.common import normalize_range
#import kornia as K
class MultimodalTimeSeriesData(VAEData):
def __init__(self, fold_seed=4):
train, val, test = get_data_ts_onh_mac(mask_onhrnfl_disc=True, fold_seed=fold_seed)
rnfls_onh = train[0][0], val[0][0], test[0][0]
rnfls_onh= [resize_stack(d, (32, 32)) for d in rnfls_onh]
self.train_rnflonh, self.val_rnflonh = process_maps(rnfls_onh[0], rnfls_onh[1],rnfls_onh[2])
# self.train_gclmac, self.val_gclmac = process_maps(train[0][3], val[0][3], test[0][3])
self.train_rnflmac, self.val_rnflmac = process_maps(train[0][2], val[0][2], test[0][2])
self.train_vft, self.val_vft = process_maps(train[0][4], val[0][4], test[0][4])
assert self.train_rnflonh.shape[0] == self.train_rnflmac.shape[0], ' Number of maps should be same '
self.train_dx, self.val_dx = process_labels(train[2][0], val[2][0], test[2][0])
self.age_at_vd_train, self.age_at_vd_val = process_labels(train[1], val[1], test[1])
# to years
self.age_at_vd_train = self.age_at_vd_train / 12.0
self.age_at_vd_val = self.age_at_vd_val / 12.0
# transform in [-1, 1]
self.age_at_vd_train = normalize_range(self.age_at_vd_train, [20, 80], [-1, 1])
self.age_at_vd_val = normalize_range(self.age_at_vd_val, [20, 80], [-1, 1])
#self.rotate = K.augmentation.RandomRotation(4, same_on_batch=True)
def augment_rot(self, amap):
#apply same rotation for a sample for all the time point
temp = [ self.rotate (d) for d in amap]
return torch.stack(temp)
def get_train_minibatch(self, batch_size):
"""
:param dx_filter:
:return: maps each os size (batch_size, t,c,H,W) and ts of size (batch_size, t)
"""
# idx = np.random.permutation(len(self.train))
idx = torch.randperm(self.train_rnflonh.shape[0], device=device)
aug_rnfl_batch = self.train_rnflonh[idx[:batch_size]] #self.augment_rot(self.train_rnflonh[idx[:batch_size]])
maps = aug_rnfl_batch, self.train_rnflmac[idx[:batch_size]], self.train_vft[
idx[:batch_size]]
ts = self.age_at_vd_train[idx[:batch_size]]
return maps, ts
def get_val(self, dx_filter=None):
"""
:param dx_filter:
:return: maps each os size (N, t,c,H,W) and dx of size (N,)
"""
maps = [self.val_rnflonh, self.val_rnflmac, self.val_vft]
# reduce from N,t to N,1 ie one diagnosis for time sample
ts = self.age_at_vd_val
dx = torch.max(self.val_dx, dim=1)[0] # (N,1)
if (dx_filter):
maps = [m[dx == dx_filter] for m in maps]
dx = dx[dx == dx_filter]
ts = ts[dx == dx_filter]
return maps, ts, dx
def size_train(self):
return self.train_rnflonh.shape[0]
def size_val(self):
return self.val_rnflonh.shape[0]
def plot_losses_(train_loss, val_loss, save_path):
fig, ax = plt.subplots()
ax.plot(train_loss, label='train loss')
ax.plot(val_loss, label='val loss')
ax.legend()
plt.savefig(save_path)
plt.close()
def save_losses(config):
df = pd.DataFrame(list(zip(config.loss_meter.loss_history, config.kl_loss_meter.loss_history,
config.loss_meter_test.loss_history,
config.kl_loss_meter_test.loss_history)),
columns=['trainloss_elbo', 'trainloss_kl', 'val_loss_elbo', 'val_loss_kl'])
df.to_csv(os.path.join(config.RESULT_DIR, 'losses.csv'))
class Config:
def create_model(self, load_weights=False):
raise NotImplementedError('imlimentation required')
def plot_losses(self):
train_loss = self.loss_meter.loss_history
val_loss = self.loss_meter_test.loss_history
plot_losses_(train_loss, val_loss, save_path=os.path.join(self.RESULT_DIR, 'lossplot.jpeg'))
train_loss_kld = self.kl_loss_meter.loss_history
val_loss_kld = self.kl_loss_meter_test.loss_history
plot_losses_(train_loss_kld, val_loss_kld, save_path=os.path.join(self.RESULT_DIR, 'lossplot_kld.jpeg'))
from train_multimodalvae import getConfig as getConfigMVAE
def getConfig(modalities_, expert_, latent_dim_, fold_seed_=4):
class MyConfig(Config):
EPOCHS = 150
learning_rate = 0.001
annealing_epochs = 10
latent_dim = latent_dim_
image_dim = 64
vfim_dim = 32
_suffix = '_foldseed' + str(fold_seed_) if fold_seed_ != 4 else ''
LOG_ROOT_DIR = 'pretrain_temp_mlode_fold2' + str(latent_dim)+_suffix
MODALITIES = modalities_ # [0, 0, 1]
expert = expert_ # 'moe'
ode_method = 'euler'
modalities_str = ''.join([str(xi) for xi in MODALITIES])
prefix = 'multimoda_latentode' + | |
<reponame>civicboom/civicboom<filename>src/civicboom/controllers/settings.py
"""
REST Settings controler
Settings are broken into named groups
These groups of settings can be view/editied using standard REST calls
The principle is that each settings has a validator associated with it
Only the form fields that are sent are validated and saved
(this is an issue for realted fields such as password, that creates a bit of a mess, see code below)
"""
from civicboom.lib.base import *
from civicboom.model import User
from civicboom.model.member import group_member_roles, group_join_mode, group_member_visibility, group_content_visibility
from civicboom.lib.constants import setting_titles
import copy
import formencode
import re
from civicboom.lib.avatar import process_avatar
from civicboom.lib.communication.messages import generators
from civicboom.lib.form_validators.validator_factory import build_schema
from civicboom.lib.form_validators.dict_overlay import validate_dict
from civicboom.lib.accounts import set_password, send_verifiy_email
from civicboom.model.meta import location_to_string
from civicboom.lib.web import _find_template_basic
from civicboom.controllers.account import AccountController
account_controller = AccountController()
from cbutils.misc import timedelta_to_str
import datetime
log = logging.getLogger(__name__)
# This also appears in Group Controller
class PrivateGroupValidator(formencode.validators.FancyValidator):
messages = {
'invalid' : x_('Value must be one of: public; private'),
'require_upgrade' : x_('You require a paid account to use this feature, please contact us!'),
}
def _to_python(self, value, state):
if value not in ['public', 'private']:
raise formencode.Invalid(self.message('invalid', state), value, state)
if value == "private" and not c.logged_in_persona.has_account_required('plus'):
raise formencode.Invalid(self.message('require_upgrade', state), value, state)
return value
#---------------------------------------------------------------------------
# Setting Units
#---------------------------------------------------------------------------
# Define settings groups, default values and display text
settings_base = {}
def add_setting(name, description, value='', group=None, **kwargs):
setting = dict(name=name, description=description, value=value, group=group, **kwargs)
settings_base[setting['name']]=setting
add_setting('name' , _('Display name' ) , group='general/general' , weight=0 , type='string' )
add_setting('username' , _('Username' ) , group='general/general' , weight=1 , type='display' , who='user' )
add_setting('description' , _('Description' ) , group='general/general' , weight=2 , type='longstring' , info=_('Tell the world about you and your interests.') )
add_setting('default_role' , _('Default Role') , group='general/group' , weight=3 , type='enum' , who='group' , value=group_member_roles.enums )
add_setting('join_mode' , _('Join Mode') , group='general/group' , weight=4 , type='enum' , who='group' , value=group_join_mode.enums )
add_setting('member_visibility' , _('Member Visibility') , group='general/group' , weight=5 , type='enum' , who='group' , value=group_member_visibility.enums )
add_setting('default_content_visibility', _('Default Content Visibility'), group='general/group' , weight=6 , type='enum' , who='group' , value=group_content_visibility.enums )
add_setting('website' , _('Website' ) , group='general/contact' , weight=7 , type='url' , info=_('Optional: add your website or blog etc. to your profile'))
add_setting('google_profile' , _('Google Plus or Google Profile'), group='general/contact' , weight=8 , type='url' , info=_('Add your profile so Google can link your content to it'))
add_setting('email' , _('Email Address') , group='general/contact' , weight=9 , type='email' , who='user' )
#add_setting('twitter_username' , _('Twitter username') , group='aggregation')
#add_setting('twitter_auth_key' , _('Twitter authkey' ) , group='aggregation')
#add_setting('broadcast_instant_news' , _('Twitter instant news') , group='aggregation', type='boolean')
#add_setting('broadcast_content_posts' , _('Twitter content' ) , group='aggregation', type='boolean')
add_setting('avatar' , _('Avatar' ) , group='general/avatar' , weight=10 , type='file' )
add_setting('password_current' , _('Current password') , group='password/password' , weight=100, type='password_current', who='user' )
add_setting('password_new' , _('New password') , group='password/password' , weight=101, type='password' , who='user' )
add_setting('password_new_confirm' , _('<PASSWORD>') , group='password/password' , weight=102, type='password' , who='user' )
# Ignore these messages generators!
ignore_generators = ['msg_test',
'assignment_response_mobile',
'syndicate_accept',
'syndicate_decline',
'syndicate_expire',
]
i = 200
for gen in generators:
if not gen[0] in ignore_generators:
add_setting('route_'+gen[0], str(gen[2]).capitalize(), group='notifications/notifications', weight=i, type="set", value=('n','e'), default=gen[1])
i = i + 1
add_setting('no_marketing_emails' , _('Stop all _site_name marketing & update emails'), group='notifications/other', weight=i, type='boolean')
add_setting('location_home' , _('Home Location' ) , group='location/location' , weight=300, type='location' )
add_setting('location_home_name' , _('Home Location' ) , group='location/location' , weight=301, type='string_location' )
add_setting('help_popup_created_user', _('Hide the help pop-up shown upon login to the site'), group='help_adverts/help_popups', weight=400, type='boolean')
add_setting('help_popup_created_group', _('Hide the help pop-up shown upon switching to a group'), group='help_adverts/help_popups', weight=401, type='boolean')
add_setting('help_popup_created_assignment', _('Hide the help pop-up shown upon creating an assignment'), group='help_adverts/help_popups', weight=402, type='boolean')
add_setting('advert_hand_response', _('Hide the info box encouraging users to start responding to requests'), group='help_adverts/guides', weight=403, type='boolean')
add_setting('advert_hand_assignment', _('Hide the info box encouraging _groups to create _assignments'), group='help_adverts/guides', weight=404, type='boolean')
add_setting('advert_hand_widget', _('Hide the info box encouraging the use of the _widget'), group='help_adverts/guides', weight=405, type='boolean')
add_setting('advert_hand_mobile', _('Hide the info box encouraging the use of the mobile app'), group='help_adverts/guides', weight=406, type='boolean')
add_setting('advert_hand_content', _('Hide the info box encouraging the creation of content'), group='help_adverts/guides', weight=407, type='boolean')
add_setting('advert_hand_hub', _('Hide the info box encouraging the creation of hubs'), group='help_adverts/guides', weight=408, type='boolean')
add_setting('auto_follow_on_accept', _('Automatically follow the user or _group who created a request on accepting it'), group='advanced/follower_settings', weight=1000, type='boolean')
add_setting('allow_registration_follows', _('Allow this user or _group to automatically follow users when they register'), group='advanced/follower_settings', weight=1001, type='boolean', info=_('Please speak to our team before you change this option!'))
add_setting('push_assignment', _('Set a _assignment you would like followers to be able to push stories to'), group='advanced/follower_settings', weight=1002, type='id_assignment', info=_('Please speak to our team before you change this option!'))
add_setting('hide_followers', _('Do not list your followers to any members other than yourself'), group='advanced/follower_settings', weight=1003, type='boolean', info=_('Protect your brands followers by hiding them from public view'))
add_setting('summary_email_interval', _('Notification summary email interval'), group='advanced/follower_settings', weight=1004, type='interval', info=_('hours=3 or days=7'), who='user')
#---------------------------------------------------------------------------
# Setting Validators (for dynamic scema construction)
#---------------------------------------------------------------------------
# these are kept separte from the group definitions because the group defenitions dict is sent to clients, we do not want to contaminate that dict
import formencode
import civicboom.lib.form_validators
import civicboom.lib.form_validators.base
import civicboom.lib.form_validators.registration
# Type validators, convert from our from type to validators
type_validators = { 'string': formencode.validators.UnicodeString(),
'longstring': civicboom.lib.form_validators.base.UnicodeStripHTMLValidator(), #formencode.validators.UnicodeString(),
'url': formencode.validators.URL(),
'email': civicboom.lib.form_validators.registration.UniqueEmailValidator(),
'password': civicboom.lib.form_validators.base.PasswordValidator(),
'password_current': civicboom.lib.form_validators.base.CurrentUserPasswordValidator(),
'file': formencode.All(formencode.validators.FieldStorageUploadConverter(), civicboom.lib.form_validators.base.FileTypeValidator(file_type_re=re.compile('^image/[-\w\+]+$'), messages={'invalid':"Sorry we don't support that file, please upload a JPEG, PNG or GIF image."})),
'location': civicboom.lib.form_validators.base.LocationValidator(),
'string_location': formencode.validators.UnicodeString(),
'boolean': formencode.validators.UnicodeString(max=10, strip=True),
'id_assignment': civicboom.lib.form_validators.base.ContentObjectValidator(persona_owner=True, content_type='assignment', not_empty=False),
'interval': civicboom.lib.form_validators.base.IntervalValidator(),
}
settings_validators = {}
for setting in settings_base.values():
# Special handling for sets (custom Set validator and also separated so we can handle form submissions
if setting['type'] == 'set':
for val in setting['value']:
settings_validators[setting['name']+'-'+val] = formencode.validators.OneOf((val, ''))
settings_validators[setting['name']] = civicboom.lib.form_validators.base.SetValidator(set=setting['value']) #formencode.validators.Set(setting['value'].split(','))
# Special handling for enums
elif setting['type'] == 'enum':
# HACK ALERT: GregM: Stupid Validator needed for private content
if setting['name'] in ['default_content_visibility', 'member_visibility']:
settings_validators[setting['name']] = PrivateGroupValidator()
else:
settings_validators[setting['name']] = formencode.validators.OneOf(setting['value'])
# Anything else from default type_validators
else:
settings_validators[setting['name']] = type_validators.get(setting['type'])
def build_meta(user, user_type, panel):
settings_meta = dict( [ (setting['name'], setting ) for setting in copy.deepcopy(settings_base).values() if setting.get('who', user_type) == user_type and setting['group'].split('/')[0] == panel ] )
panels = dict( [
(
setting['group'].split('/')[0],
{
'panel':setting['group'].split('/')[0],
'weight':setting['weight'],
'title': setting_titles().get(setting['group'].split('/')[0]) if setting_titles().get(setting['group'].split('/')[0]) else setting['group'].split('/')[0]
}
)
for setting in settings_base.values() if setting.get('who', user_type) == user_type
] )
settings_hints = {}
# Populate settings dictionary for this user
for setting_name in settings_meta.keys():
if settings_meta[setting_name].get('who', user_type) == user_type:
if setting_name == 'email' and user.email_unverified != None:
settings_hints['email'] = _( 'You have an unverified email address. This could be for two reasons:') + '<ol>' + \
'<li>' + _('You have signed up to _site_name via Twitter, Facebook, LinkedIn, etc.') + '</li>' + \
'<li>' + _('You have changed your email address and not verified it.') + '</li>' + \
'</ol>' + _('To verify your email: please check your email account (%s) and follow the instructions.') % user.email_unverified + '<br />' + \
_('OR enter new email address above and check your email.')
#_('You have an unverified email address (%s), please check your email. If this address is incorrect please update and save, then check your email.') % user.email_unverified
if setting_name == 'password_current' and 'password_current' in settings_meta and not 'password' in [login.type for login in user.login_details]:
del settings_meta[setting_name]
settings_hints['password_new'] = _("You have signed up via Twitter, Facebook, LinkedIn, etc. In order to login to our mobile app, please create a Civicboom password here. (You will use this password and your username to login to the app.)")
if 'password' in setting_name and user.email_unverified != None:
if setting_name in settings_meta:
if not '_readonly' in settings_meta[setting_name]['type']:
settings_meta[setting_name]['type'] = settings_meta[setting_name]['type'] + '_readonly'
if not 'password' in [login.type for login in user.login_details]:
settings_hints['password_new'] = _("If you want to change your Civicboom password, please verify your email address (see General settings). You will need to verify your address and create a password to use our mobile app.")
else:
settings_hints['password_current'] = _("If you want to change your Civicboom password, please verify your email address (see General settings).")
data = dict( settings_meta=settings_meta,
settings_hints=settings_hints,
panels=panels,
panel=panel
)
return data
def find_template(panel, user_type):
try:
# panel_user_type?
_find_template_basic(action='panel/'+panel+'_'+user_type)
template = panel+'_'+user_type
except:
try:
# panel?
_find_template_basic(action='panel/'+panel)
template = panel
except:
# default to generic
template = 'generic'
return template
def copy_user_settings(settings_meta, user, user_type):
settings = {}
for setting_name in settings_meta.keys():
setting_name_repl = setting_name.replace('_read_only', '')
if settings_meta[setting_name_repl].get('who', user_type) == user_type:
if hasattr(user, setting_name_repl):
v = getattr(user, setting_name_repl)
else:
v = user.config.get(setting_name_repl, settings_meta[setting_name].get('default'))
# Special case for email addresses, if the user has no email address but | |
the hash values for those files. If the
build source came in a single package such as a gzipped tarfile
(`.tar.gz`), the `FileHash` will be for the single path to that file.
Messages:
AdditionalProperty: An additional property for a FileHashesValue object.
Fields:
additionalProperties: Additional properties of type FileHashesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a FileHashesValue object.
Fields:
key: Name of the additional property.
value: A FileHashes attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('FileHashes', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
fileHashes = _messages.MessageField('FileHashesValue', 1)
resolvedRepoSource = _messages.MessageField('RepoSource', 2)
resolvedStorageSource = _messages.MessageField('StorageSource', 3)
resolvedStorageSourceManifest = _messages.MessageField('StorageSourceManifest', 4)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class StorageSource(_messages.Message):
r"""Location of the source in an archive file in Google Cloud Storage.
Fields:
bucket: Google Cloud Storage bucket containing the source (see [Bucket
Name Requirements](https://cloud.google.com/storage/docs/bucket-
naming#requirements)).
generation: Google Cloud Storage generation for the object. If the
generation is omitted, the latest generation will be used.
object: Google Cloud Storage object containing the source. This object
must be a zipped (`.zip`) or gzipped archive file (`.tar.gz`) containing
source to build.
"""
bucket = _messages.StringField(1)
generation = _messages.IntegerField(2)
object = _messages.StringField(3)
class StorageSourceManifest(_messages.Message):
r"""Location of the source manifest in Google Cloud Storage. This feature is
in Preview; see description
[here](https://github.com/GoogleCloudPlatform/cloud-
builders/tree/master/gcs-fetcher).
Fields:
bucket: Google Cloud Storage bucket containing the source manifest (see
[Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-
naming#requirements)).
generation: Google Cloud Storage generation for the object. If the
generation is omitted, the latest generation will be used.
object: Google Cloud Storage object containing the source manifest. This
object must be a JSON file.
"""
bucket = _messages.StringField(1)
generation = _messages.IntegerField(2)
object = _messages.StringField(3)
class TimeSpan(_messages.Message):
r"""Start and end times for a build execution phase.
Fields:
endTime: End of time span.
startTime: Start of time span.
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateBitbucketServerConfigOperationMetadata(_messages.Message):
r"""Metadata for `UpdateBitbucketServerConfig` operation.
Fields:
bitbucketServerConfig: The resource name of the BitbucketServerConfig to
be updated. Format:
`projects/{project}/locations/{location}/bitbucketServerConfigs/{id}`.
completeTime: Time the operation was completed.
createTime: Time the operation was created.
"""
bitbucketServerConfig = _messages.StringField(1)
completeTime = _messages.StringField(2)
createTime = _messages.StringField(3)
class UpdateGitHubEnterpriseConfigOperationMetadata(_messages.Message):
r"""Metadata for `UpdateGitHubEnterpriseConfig` operation.
Fields:
completeTime: Time the operation was completed.
createTime: Time the operation was created.
githubEnterpriseConfig: The resource name of the GitHubEnterprise to be
updated. Format:
`projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}`.
"""
completeTime = _messages.StringField(1)
createTime = _messages.StringField(2)
githubEnterpriseConfig = _messages.StringField(3)
class UpdateWorkerPoolOperationMetadata(_messages.Message):
r"""Metadata for the `UpdateWorkerPool` operation.
Fields:
completeTime: Time the operation was completed.
createTime: Time the operation was created.
workerPool: The resource name of the `WorkerPool` being updated. Format:
`projects/{project}/locations/{location}/workerPools/{worker_pool}`.
"""
completeTime = _messages.StringField(1)
createTime = _messages.StringField(2)
workerPool = _messages.StringField(3)
class Volume(_messages.Message):
r"""Volume describes a Docker container volume which is mounted into build
steps in order to persist files across build step execution.
Fields:
name: Name of the volume to mount. Volume names must be unique per build
step and must be valid names for Docker volumes. Each named volume must
be used by at least two build steps.
path: Path at which to mount the volume. Paths must be absolute and cannot
conflict with other volume paths on the same build step or with certain
reserved volume paths.
"""
name = _messages.StringField(1)
path = _messages.StringField(2)
class Warning(_messages.Message):
r"""A non-fatal problem encountered during the execution of the build.
Enums:
PriorityValueValuesEnum: The priority for this warning.
Fields:
priority: The priority for this warning.
text: Explanation of the warning generated.
"""
class PriorityValueValuesEnum(_messages.Enum):
r"""The priority for this warning.
Values:
PRIORITY_UNSPECIFIED: Should not be used.
INFO: e.g. deprecation warnings and alternative feature highlights.
WARNING: e.g. automated detection of possible issues with the build.
ALERT: e.g. alerts that a feature used in the build is pending removal
"""
PRIORITY_UNSPECIFIED = 0
INFO = 1
WARNING = 2
ALERT = 3
priority = _messages.EnumField('PriorityValueValuesEnum', 1)
text = _messages.StringField(2)
class WebhookConfig(_messages.Message):
r"""WebhookConfig describes the configuration of a trigger that creates a
build whenever a webhook is sent to a trigger's webhook URL.
Enums:
StateValueValuesEnum: Potential issues with the underlying Pub/Sub
subscription configuration. Only populated on get requests.
Fields:
secret: Required. Resource name for the secret required as a URL
parameter.
state: Potential issues with the underlying Pub/Sub subscription
configuration. Only populated on get requests.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Potential issues with the underlying Pub/Sub subscription
configuration. Only populated on get requests.
Values:
STATE_UNSPECIFIED: The webhook auth configuration not been checked.
OK: The auth configuration is properly setup.
SECRET_DELETED: The secret provided in auth_method has been deleted.
"""
STATE_UNSPECIFIED = 0
OK = 1
SECRET_DELETED = 2
secret = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
class WorkerConfig(_messages.Message):
r"""Defines the configuration to be used for creating workers in the pool.
Fields:
diskSizeGb: Size of the disk attached to the worker, in GB. See [Worker
pool config file](https://cloud.google.com/build/docs/private-
pools/worker-pool-config-file-schema). Specify a value of up to 1000. If
`0` is specified, Cloud Build will use a standard disk size.
machineType: Machine type of a worker, such as `e2-medium`. See [Worker
pool config file](https://cloud.google.com/build/docs/private-
pools/worker-pool-config-file-schema). If left blank, Cloud Build will
use a sensible default.
"""
diskSizeGb = _messages.IntegerField(1)
machineType = _messages.StringField(2)
class WorkerPool(_messages.Message):
r"""Configuration for a `WorkerPool`. Cloud Build owns and maintains a pool
of workers for general use and have no access to a project's private
network. By default, builds submitted to Cloud Build will use a worker from
| |
<gh_stars>100-1000
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from typing import Callable
import numpy as np
import pytest
from sparseml.tensorflow_v1.optim import (
GroupLearningRateModifier,
LearningRateModifier,
Modifier,
ScheduledModifierManager,
SetLearningRateModifier,
)
from sparseml.tensorflow_v1.optim.modifier import (
EXTRAS_KEY_LEARNING_RATE,
EXTRAS_KEY_SUMMARIES,
)
from sparseml.tensorflow_v1.utils import tf_compat
from tests.sparseml.tensorflow_v1.optim.test_modifier import (
ScheduledModifierTest,
mlp_graph_lambda,
)
EPSILON = 1e-7
##############################
#
# SetLearningRateModifier tests
#
##############################
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
@pytest.mark.parametrize(
"graph_lambda,modifier_lambda",
[
(
mlp_graph_lambda,
lambda: SetLearningRateModifier(learning_rate=0.1),
),
(
mlp_graph_lambda,
lambda: SetLearningRateModifier(learning_rate=0.03, start_epoch=5),
),
],
scope="function",
)
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestSetLRModifierImpl(ScheduledModifierTest):
@pytest.mark.parametrize(
"optim_lambda",
[tf_compat.train.GradientDescentOptimizer, tf_compat.train.AdamOptimizer],
)
def test_lifecycle(
self,
modifier_lambda: Callable[[], SetLearningRateModifier],
graph_lambda: Callable[[], tf_compat.Graph],
steps_per_epoch: int,
optim_lambda,
):
modifier = modifier_lambda()
graph = graph_lambda()
with graph.as_default():
global_step = tf_compat.train.get_or_create_global_step()
# Further set up for loss, optimizer and training op
x_batch = graph.get_tensor_by_name("inp:0")
y_pred = graph.get_tensor_by_name("out:0")
n_inputs = x_batch.shape[1]
n_outputs = y_pred.shape[1]
y_lab = tf_compat.placeholder(
tf_compat.float32, shape=(None, n_outputs), name="y"
)
mod_ops, mod_extras = modifier.create_ops(
steps_per_epoch, global_step=global_step, graph=graph
)
assert len(mod_ops) == 0
assert len(mod_extras) == 2
assert EXTRAS_KEY_LEARNING_RATE in mod_extras
assert EXTRAS_KEY_SUMMARIES in mod_extras
learning_rate = mod_extras[EXTRAS_KEY_LEARNING_RATE]
with tf_compat.name_scope("train"):
optimizer = optim_lambda(learning_rate=learning_rate)
loss = tf_compat.losses.mean_squared_error(y_lab, y_pred)
training_op = optimizer.minimize(loss, global_step=global_step)
np.random.seed(12)
batch_size = 8
batch_x = np.random.randn(batch_size, n_inputs)
batch_lab = np.random.randn(batch_size, n_outputs)
with tf_compat.Session(graph=graph) as sess:
sess.run(tf_compat.global_variables_initializer())
for epoch in range(int(max(modifier.start_epoch, modifier.end_epoch)) + 5):
for step in range(steps_per_epoch):
gs = sess.run(global_step)
expected = modifier.learning_rate
optim_lr = sess.run(_get_lr(optimizer))
assert (
abs(optim_lr - expected) <= EPSILON
), "Failed at epoch:{} step:{} global_step:{}".format(
epoch, step, gs
)
sess.run(
training_op,
feed_dict={x_batch: batch_x, y_lab: batch_lab},
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
def test_set_lr_yaml():
start_epoch = 10.0
set_lr = 0.1
yaml_str = """
!SetLearningRateModifier
learning_rate: {}
start_epoch: {}
""".format(
set_lr, start_epoch
)
yaml_modifier = SetLearningRateModifier.load_obj(
yaml_str
) # type: SetLearningRateModifier
serialized_modifier = SetLearningRateModifier.load_obj(
str(yaml_modifier)
) # type: SetLearningRateModifier
obj_modifier = SetLearningRateModifier(
learning_rate=set_lr, start_epoch=start_epoch
)
assert isinstance(yaml_modifier, SetLearningRateModifier)
assert (
yaml_modifier.learning_rate
== serialized_modifier.learning_rate
== obj_modifier.learning_rate
)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
##############################
#
# LearningRateModifier tests
#
##############################
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
@pytest.mark.parametrize(
"graph_lambda,modifier_lambda",
[
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="ExponentialLR",
lr_kwargs={"gamma": 0.9},
start_epoch=0,
init_lr=0.1,
),
),
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="ExponentialLR",
lr_kwargs={"gamma": 0.5},
start_epoch=5,
end_epoch=13,
init_lr=0.1,
),
),
],
scope="function",
)
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestLRModifierExponentialImpl(ScheduledModifierTest):
@pytest.mark.parametrize(
"optim_lambda",
[tf_compat.train.GradientDescentOptimizer, tf_compat.train.AdamOptimizer],
)
def test_lifecycle(
self,
modifier_lambda: Callable[[], SetLearningRateModifier],
graph_lambda: Callable[[], tf_compat.Graph],
steps_per_epoch: int,
optim_lambda,
):
modifier = modifier_lambda()
graph = graph_lambda()
with graph.as_default():
global_step = tf_compat.train.get_or_create_global_step()
# Further set up for loss, optimizer and training op
x_batch = graph.get_tensor_by_name("inp:0")
y_pred = graph.get_tensor_by_name("out:0")
n_inputs = x_batch.shape[1]
n_outputs = y_pred.shape[1]
y_lab = tf_compat.placeholder(
tf_compat.float32, shape=(None, n_outputs), name="y"
)
mod_ops, mod_extras = modifier.create_ops(
steps_per_epoch, global_step=global_step, graph=graph
)
assert len(mod_ops) == 0
assert len(mod_extras) == 2
assert EXTRAS_KEY_LEARNING_RATE in mod_extras
assert EXTRAS_KEY_SUMMARIES in mod_extras
learning_rate = mod_extras[EXTRAS_KEY_LEARNING_RATE]
with tf_compat.name_scope("train"):
optimizer = optim_lambda(learning_rate=learning_rate)
loss = tf_compat.losses.mean_squared_error(y_lab, y_pred)
training_op = optimizer.minimize(loss, global_step=global_step)
np.random.seed(12)
batch_size = 8
batch_x = np.random.randn(batch_size, n_inputs)
batch_lab = np.random.randn(batch_size, n_outputs)
with tf_compat.Session(graph=graph) as sess:
sess.run(tf_compat.global_variables_initializer())
for epoch in range(int(max(modifier.start_epoch, modifier.end_epoch)) + 5):
if epoch < modifier.start_epoch:
expected = modifier.init_lr
elif epoch < modifier.end_epoch or modifier.end_epoch == -1:
expected = modifier.init_lr * (
modifier.lr_kwargs["gamma"]
** (epoch - int(modifier.start_epoch))
)
else:
expected = modifier.init_lr * (
modifier.lr_kwargs["gamma"]
** int(modifier.end_epoch - modifier.start_epoch - 1)
)
for step in range(steps_per_epoch):
gs = sess.run(global_step)
optim_lr = sess.run(_get_lr(optimizer))
assert (
abs(optim_lr - expected) <= EPSILON
), "Failed at epoch:{} step:{} global_step:{}".format(
epoch, step, gs
)
sess.run(
training_op,
feed_dict={x_batch: batch_x, y_lab: batch_lab},
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
def test_lr_modifier_exponential_yaml():
gamma = 0.9
lr_class = "ExponentialLR"
lr_kwargs = {"gamma": gamma}
start_epoch = 10.0
end_epoch = 20.0
init_lr = 0.1
yaml_str = """
!LearningRateModifier
start_epoch: {}
end_epoch: {}
lr_class: {}
lr_kwargs: {}
init_lr: {}
""".format(
start_epoch, end_epoch, lr_class, lr_kwargs, init_lr
)
yaml_modifier = LearningRateModifier.load_obj(
yaml_str
) # type: LearningRateModifier
serialized_modifier = LearningRateModifier.load_obj(
str(yaml_modifier)
) # type: LearningRateModifier
obj_modifier = LearningRateModifier(
start_epoch=start_epoch,
end_epoch=end_epoch,
lr_class=lr_class,
lr_kwargs=lr_kwargs,
init_lr=init_lr,
)
assert isinstance(yaml_modifier, LearningRateModifier)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
yaml_modifier.end_epoch
== serialized_modifier.end_epoch
== obj_modifier.end_epoch
)
assert (
yaml_modifier.update_frequency
== serialized_modifier.update_frequency
== obj_modifier.update_frequency
)
assert (
yaml_modifier.lr_class == serialized_modifier.lr_class == obj_modifier.lr_class
)
assert (
yaml_modifier.lr_kwargs
== serialized_modifier.lr_kwargs
== obj_modifier.lr_kwargs
)
assert yaml_modifier.init_lr == serialized_modifier.init_lr == obj_modifier.init_lr
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
@pytest.mark.parametrize(
"graph_lambda,modifier_lambda",
[
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="StepLR",
lr_kwargs={"gamma": 0.9, "step_size": 3},
start_epoch=0,
init_lr=0.1,
),
),
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="StepLR",
lr_kwargs={"gamma": 0.5, "step_size": 2},
start_epoch=5,
end_epoch=11,
init_lr=0.01,
),
),
],
scope="function",
)
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestLRModifierStepImpl(ScheduledModifierTest):
@pytest.mark.parametrize(
"optim_lambda",
[tf_compat.train.GradientDescentOptimizer, tf_compat.train.AdamOptimizer],
)
def test_lifecycle(
self,
modifier_lambda: Callable[[], SetLearningRateModifier],
graph_lambda: Callable[[], tf_compat.Graph],
steps_per_epoch: int,
optim_lambda,
):
modifier = modifier_lambda()
graph = graph_lambda()
with graph.as_default():
global_step = tf_compat.train.get_or_create_global_step()
# Further set up for loss, optimizer and training op
x_batch = graph.get_tensor_by_name("inp:0")
y_pred = graph.get_tensor_by_name("out:0")
n_inputs = x_batch.shape[1]
n_outputs = y_pred.shape[1]
y_lab = tf_compat.placeholder(
tf_compat.float32, shape=(None, n_outputs), name="y"
)
mod_ops, mod_extras = modifier.create_ops(
steps_per_epoch, global_step=global_step, graph=graph
)
assert len(mod_ops) == 0
assert len(mod_extras) == 2
assert EXTRAS_KEY_LEARNING_RATE in mod_extras
assert EXTRAS_KEY_SUMMARIES in mod_extras
learning_rate = mod_extras[EXTRAS_KEY_LEARNING_RATE]
with tf_compat.name_scope("train"):
optimizer = optim_lambda(learning_rate=learning_rate)
loss = tf_compat.losses.mean_squared_error(y_lab, y_pred)
training_op = optimizer.minimize(loss, global_step=global_step)
np.random.seed(12)
batch_size = 8
batch_x = np.random.randn(batch_size, n_inputs)
batch_lab = np.random.randn(batch_size, n_outputs)
with tf_compat.Session(graph=graph) as sess:
sess.run(tf_compat.global_variables_initializer())
for epoch in range(int(max(modifier.start_epoch, modifier.end_epoch)) + 5):
if epoch < modifier.start_epoch:
expected = modifier.init_lr
elif epoch < modifier.end_epoch or modifier.end_epoch == -1:
expected = modifier.init_lr * (
modifier.lr_kwargs["gamma"]
** math.floor(
(epoch - modifier.start_epoch)
/ modifier.lr_kwargs["step_size"]
)
)
else:
expected = modifier.init_lr * (
modifier.lr_kwargs["gamma"]
** (
math.floor(
(modifier.end_epoch - modifier.start_epoch)
/ modifier.lr_kwargs["step_size"]
)
- 1
)
)
for step in range(steps_per_epoch):
gs = sess.run(global_step)
optim_lr = sess.run(_get_lr(optimizer))
assert (
abs(optim_lr - expected) <= EPSILON
), "Failed at epoch:{} step:{} global_step:{}".format(
epoch, step, gs
)
sess.run(
training_op,
feed_dict={x_batch: batch_x, y_lab: batch_lab},
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
def test_lr_modifier_step_yaml():
gamma = 0.9
lr_class = "StepLR"
lr_kwargs = {"step_size": 1.0, "gamma": gamma}
start_epoch = 10.0
end_epoch = 20.0
init_lr = 0.1
yaml_str = """
!LearningRateModifier
start_epoch: {}
end_epoch: {}
lr_class: {}
lr_kwargs: {}
init_lr: {}
""".format(
start_epoch, end_epoch, lr_class, lr_kwargs, init_lr
)
yaml_modifier = LearningRateModifier.load_obj(
yaml_str
) # type: LearningRateModifier
serialized_modifier = LearningRateModifier.load_obj(
str(yaml_modifier)
) # type: LearningRateModifier
obj_modifier = LearningRateModifier(
start_epoch=start_epoch,
end_epoch=end_epoch,
lr_class=lr_class,
lr_kwargs=lr_kwargs,
init_lr=init_lr,
)
assert isinstance(yaml_modifier, LearningRateModifier)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
yaml_modifier.end_epoch
== serialized_modifier.end_epoch
== obj_modifier.end_epoch
)
assert (
yaml_modifier.update_frequency
== serialized_modifier.update_frequency
== obj_modifier.update_frequency
)
assert (
yaml_modifier.lr_class == serialized_modifier.lr_class == obj_modifier.lr_class
)
assert (
yaml_modifier.lr_kwargs
== serialized_modifier.lr_kwargs
== obj_modifier.lr_kwargs
)
assert yaml_modifier.init_lr == serialized_modifier.init_lr == obj_modifier.init_lr
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
@pytest.mark.parametrize(
"graph_lambda,modifier_lambda",
[
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="MultiStepLR",
lr_kwargs={"gamma": 0.9, "milestones": [1, 3, 4]},
start_epoch=0,
init_lr=0.1,
),
),
(
mlp_graph_lambda,
lambda: LearningRateModifier(
lr_class="MultiStepLR",
lr_kwargs={"gamma": 0.95, "milestones": [5, 8]},
start_epoch=3,
end_epoch=13,
init_lr=0.1,
),
),
],
scope="function",
)
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestLRModifierMultiStepImpl(ScheduledModifierTest):
@pytest.mark.parametrize(
"optim_lambda",
[tf_compat.train.GradientDescentOptimizer, tf_compat.train.AdamOptimizer],
)
def test_lifecycle(
self,
modifier_lambda: Callable[[], LearningRateModifier],
graph_lambda: Callable[[], tf_compat.Graph],
steps_per_epoch: int,
optim_lambda,
):
modifier = modifier_lambda()
graph = graph_lambda()
with graph.as_default():
global_step = tf_compat.train.get_or_create_global_step()
# Further set up for loss, optimizer and training op
x_batch = graph.get_tensor_by_name("inp:0")
y_pred = graph.get_tensor_by_name("out:0")
n_inputs = x_batch.shape[1]
n_outputs = y_pred.shape[1]
y_lab = tf_compat.placeholder(
tf_compat.float32, shape=(None, n_outputs), name="y"
)
mod_ops, mod_extras = modifier.create_ops(
steps_per_epoch, global_step=global_step, graph=graph
)
assert len(mod_ops) == 0
assert len(mod_extras) == 2
assert EXTRAS_KEY_LEARNING_RATE in mod_extras
assert EXTRAS_KEY_SUMMARIES in mod_extras
learning_rate = mod_extras[EXTRAS_KEY_LEARNING_RATE]
with tf_compat.name_scope("train"):
optimizer = optim_lambda(learning_rate=learning_rate)
loss = tf_compat.losses.mean_squared_error(y_lab, y_pred)
training_op = optimizer.minimize(loss, global_step=global_step)
np.random.seed(12)
batch_size = 8
batch_x = np.random.randn(batch_size, n_inputs)
batch_lab = np.random.randn(batch_size, n_outputs)
with tf_compat.Session(graph=graph) as sess:
sess.run(tf_compat.global_variables_initializer())
for epoch in range(int(max(modifier.start_epoch, modifier.end_epoch)) + 5):
if epoch < modifier.start_epoch:
expected = modifier.init_lr
else:
num_gammas = sum(
[
1
for mile in modifier.lr_kwargs["milestones"]
if epoch >= mile
| |
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.blocks.mujoco.block_pick_and_place_v2_retry import BlockPickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
import numpy as np
from torch.distributions import Normal
import pickle
import torch
import torch.nn as nn
from argparse import ArgumentParser
import imageio
from rlkit.envs.blocks.mujoco.block_stacking_env import BlockEnv
from rlkit.core import logger
from torchvision.utils import save_image
from rlkit.util.plot import plot_multi_image
import json
import os
import rlkit.torch.iodine.iodine as iodine
from collections import OrderedDict
class Cost:
def __init__(self, type, logger_prefix_dir):
self.type = type
self.remove_goal_latents = False
self.logger_prefix_dir = logger_prefix_dir
def best_action(self, mpc_step, goal_latents, goal_latents_recon, goal_image, pred_latents,
pred_latents_recon, pred_image, actions):
if self.type == 'min_min_latent':
return self.min_min_latent(mpc_step, goal_latents, goal_latents_recon, goal_image,
pred_latents, pred_latents_recon, pred_image, actions)
if self.type == 'sum_goal_min_latent':
self.remove_goal_latents = False
return self.sum_goal_min_latent(mpc_step, goal_latents, goal_latents_recon, goal_image,
pred_latents, pred_latents_recon, pred_image, actions)
elif self.type == 'goal_pixel':
return self.goal_pixel(goal_latents, goal_image, pred_latents, pred_image, actions)
elif self.type == 'latent_pixel':
return self.latent_full_pixel(mpc_step, goal_latents_recon, goal_image,
pred_latents_recon,
pred_image, actions)
else:
raise Exception
def mse(self, l1, l2):
# l1 is (..., rep_size) l2 is (..., rep_size)
return torch.pow(l1 - l2, 2).mean(-1)
def min_min_latent(self, mpc_step, goal_latents, goal_latents_recon, goal_image,
pred_latents, pred_latents_recon, pred_image, actions):
# obs_latents is (n_actions, K, rep_size)
# pred_obs is (n_actions, 3, imsize, imsize)
best_goal_idx = 0
best_action_idx = 0
best_cost = np.inf
best_latent_idx = 0
n_actions = actions.shape[0]
K = pred_latents_recon.shape[1]
rep_size = 128
# Compare against each goal latent
costs = []
costs_latent = []
latent_idxs = []
for i in range(goal_latents.shape[0]):
cost = torch.pow(goal_latents[i].view(1, 1, rep_size) - pred_latents,
2).mean(2)
costs_latent.append(cost)
cost, latent_idx = cost.min(-1) # take min among K
costs.append(cost)
latent_idxs.append(latent_idx)
min_cost, action_idx = cost.min(0) # take min among n_actions
if min_cost <= best_cost:
best_goal_idx = i
best_action_idx = action_idx
best_cost = min_cost
best_latent_idx = latent_idx[action_idx]
costs = torch.stack(costs) # (n_goal_latents, n_actions )
latent_idxs = torch.stack(latent_idxs) # (n_goal_latents, n_actions )
matching_costs, matching_goal_idx = costs.min(0)
matching_latent_idx = latent_idxs[matching_goal_idx, np.arange(n_actions)]
matching_goal_rec = torch.stack([goal_latents_recon[j] for j in matching_goal_idx])
matching_latent_rec = torch.stack(
[pred_latents_recon[i][matching_latent_idx[i]] for i in range(n_actions)])
best_pred_obs = ptu.get_numpy(pred_image[best_action_idx])
full_plot = torch.cat([pred_image.unsqueeze(0),
pred_latents_recon.permute(1, 0, 2, 3, 4),
matching_latent_rec.unsqueeze(0),
matching_goal_rec.unsqueeze(0)], 0)
best_action_idxs = costs.min(0)[0].sort()[1]
plot_size = 8
full_plot = full_plot[:, ptu.get_numpy(best_action_idxs[:plot_size])]
caption = np.zeros(full_plot.shape[:2])
caption[1:1 + K, :] = ptu.get_numpy(torch.stack(costs_latent).min(0)[0].permute(1, 0))[:,
:plot_size]
caption[-2, :] = matching_costs.cpu().numpy()[ptu.get_numpy(best_action_idxs[:plot_size])]
plot_multi_image(ptu.get_numpy(full_plot),
logger.get_snapshot_dir() + '%s/mpc_pred_%d.png' % (
self.logger_prefix_dir, mpc_step),
caption=caption)
return best_pred_obs, actions[ptu.get_numpy(best_action_idxs)], best_goal_idx
def sum_goal_min_latent(self, mpc_step, goal_latents, goal_latents_recon, goal_image,
pred_latents, pred_latents_recon, pred_image, actions):
# obs_latents is (n_actions, K, rep_size)
# pred_obs is (n_actions, 3, imsize, imsize)
best_goal_idx = 0 # here this is meaningless
n_actions = actions.shape[0]
K = pred_latents_recon.shape[1]
# Compare against each goal latent
costs = []
costs_latent = []
latent_idxs = []
for i in range(goal_latents.shape[0]):
cost = self.mse(goal_latents[i].view(1, 1, -1), pred_latents) # cost is (n_actions, K)
costs_latent.append(cost)
cost, latent_idx = cost.min(-1) # take min among K
costs.append(cost)
latent_idxs.append(latent_idx)
costs = torch.stack(costs)
latent_idxs = torch.stack(latent_idxs) # (n_goal_latents, n_actions )
best_action_idxs = costs.sum(0).sort()[1]
best_pred_obs = ptu.get_numpy(pred_image[best_action_idxs[0]])
matching_costs, matching_goal_idx = costs.min(0)
matching_latent_idx = latent_idxs[matching_goal_idx, np.arange(n_actions)]
matching_goal_rec = torch.stack([goal_latents_recon[j] for j in matching_goal_idx])
matching_latent_rec = torch.stack(
[pred_latents_recon[i][matching_latent_idx[i]] for i in range(n_actions)])
full_plot = torch.cat([pred_image.unsqueeze(0),
pred_latents_recon.permute(1, 0, 2, 3, 4),
matching_latent_rec.unsqueeze(0),
matching_goal_rec.unsqueeze(0)], 0)
plot_size = 8
full_plot = full_plot[:, :plot_size]
caption = np.zeros(full_plot.shape[:2])
caption[1:1 + K, :] = ptu.get_numpy(torch.stack(costs_latent).min(0)[0].permute(1, 0))[:,
:plot_size]
caption[-2, :] = matching_costs.cpu().numpy()[:plot_size]
plot_multi_image(ptu.get_numpy(full_plot),
logger.get_snapshot_dir() + '%s/mpc_pred_%d.png' % (
self.logger_prefix_dir, mpc_step),
caption=caption)
return best_pred_obs, actions[ptu.get_numpy(best_action_idxs)], best_goal_idx
def goal_pixel(self, goal_latents, goal_image, pred_latents, pred_image, actions):
mse = torch.pow(pred_image - goal_image, 2).mean(3).mean(2).mean(1)
min_cost, action_idx = mse.min(0)
return ptu.get_numpy(pred_image[action_idx]), actions[action_idx], 0
def latent_pixel(self, mpc_step, goal_latents_recon, goal_image, pred_latents_recon, pred_image,
actions):
# obs_latents is (n_actions, K, rep_size)
# pred_obs is (n_actions, 3, imsize, imsize)
best_goal_idx = 0
best_action_idx = 0
best_cost = np.inf
best_latent_idx = 0
n_actions = actions.shape[0]
K = pred_latents_recon.shape[1]
imshape = (3, 64, 64)
# Compare against each goal latent
costs = []
costs_latent = []
latent_idxs = []
for i in range(goal_latents_recon.shape[0]):
cost = torch.pow(goal_latents_recon[i].view(1, 1, *imshape) - pred_latents_recon,
2).mean(4).mean(3).mean(2)
costs_latent.append(cost)
cost, latent_idx = cost.min(-1) # take min among K
costs.append(cost)
latent_idxs.append(latent_idx)
min_cost, action_idx = cost.min(0) # take min among n_actions
if min_cost <= best_cost:
best_goal_idx = i
best_action_idx = action_idx
best_cost = min_cost
best_latent_idx = latent_idx[action_idx]
costs = torch.stack(costs) # (n_goal_latents, n_actions )
latent_idxs = torch.stack(latent_idxs) # (n_goal_latents, n_actions )
best_action_idxs = costs.min(0)[0].sort()[1]
matching_costs, matching_goal_idx = costs.min(0)
matching_latent_idx = latent_idxs[matching_goal_idx, np.arange(n_actions)]
matching_goal_rec = torch.stack([goal_latents_recon[j] for j in matching_goal_idx])
matching_latent_rec = torch.stack(
[pred_latents_recon[i][matching_latent_idx[i]] for i in range(n_actions)])
best_pred_obs = ptu.get_numpy(pred_image[best_action_idx])
full_plot = torch.cat([pred_image.unsqueeze(0),
pred_latents_recon.permute(1, 0, 2, 3, 4),
matching_latent_rec.unsqueeze(0),
matching_goal_rec.unsqueeze(0)], 0)
plot_size = 8
full_plot = full_plot[:, ptu.get_numpy(best_action_idxs[:plot_size])]
caption = np.zeros(full_plot.shape[:2])
caption[1:1 + K, :] = ptu.get_numpy(torch.stack(costs_latent).min(0)[0].permute(1, 0))[:,
:plot_size]
caption[-2, :] = matching_costs.cpu().numpy()[ptu.get_numpy(best_action_idxs[:plot_size])]
plot_multi_image(ptu.get_numpy(full_plot),
logger.get_snapshot_dir() + '%s/mpc_pred_%d.png' % (
self.logger_prefix_dir, mpc_step),
caption=caption)
return best_pred_obs, actions[ptu.get_numpy(best_action_idxs)], best_goal_idx
def latent_full_pixel(self, mpc_step, goal_latents_recon, goal_image, pred_latents_recon,
pred_image,
actions):
# obs_latents is (n_actions, K, rep_size)
# pred_obs is (n_actions, 3, imsize, imsize)
best_goal_idx = 0
best_action_idx = 0
best_cost = np.inf
best_latent_idx = 0
n_actions = actions.shape[0]
K = pred_latents_recon.shape[1]
imshape = (3, 64, 64)
# Compare against each goal latent
costs = []
costs_latent = []
latent_idxs = []
for i in range(goal_latents_recon.shape[0]):
cost = torch.pow(goal_latents_recon[i].view(1, 1, *imshape) - pred_latents_recon,
2).mean(4).mean(3).mean(2)
costs_latent.append(cost)
cost, latent_idx = cost.min(-1) # take min among K
costs.append(cost)
latent_idxs.append(latent_idx)
min_cost, action_idx = cost.min(0) # take min among n_actions
if min_cost <= best_cost:
best_goal_idx = i
best_action_idx = action_idx
best_cost = min_cost
best_latent_idx = latent_idx[action_idx]
costs = torch.stack(costs) # (n_goal_latents, n_actions )
latent_idxs = torch.stack(latent_idxs) # (n_goal_latents, n_actions )
best_action_idxs = costs.min(0)[0].sort()[1]
matching_costs, matching_goal_idx = costs.min(0)
matching_latent_idx = latent_idxs[matching_goal_idx, np.arange(n_actions)]
matching_goal_rec = torch.stack([goal_latents_recon[j] for j in matching_goal_idx])
matching_latent_rec = torch.stack(
[pred_latents_recon[i][matching_latent_idx[i]] for i in range(n_actions)])
best_pred_obs = ptu.get_numpy(pred_image[best_action_idx])
full_plot = torch.cat([pred_image.unsqueeze(0),
pred_latents_recon.permute(1, 0, 2, 3, 4),
matching_latent_rec.unsqueeze(0),
matching_goal_rec.unsqueeze(0)], 0)
plot_size = 8
mse = torch.pow(pred_image - goal_image, 2).mean(3).mean(2).mean(1)
best_action_idxs = mse.sort()[1]
full_plot = full_plot[:, ptu.get_numpy(best_action_idxs[:plot_size])]
caption = np.zeros(full_plot.shape[:2])
caption[1:1 + K, :] = ptu.get_numpy(torch.stack(costs_latent).min(0)[0].permute(1, 0))[:,
:plot_size]
caption[-2, :] = matching_costs.cpu().numpy()[ptu.get_numpy(best_action_idxs[:plot_size])]
plot_multi_image(ptu.get_numpy(full_plot),
logger.get_snapshot_dir() + '%s/mpc_pred_%d.png' % (
self.logger_prefix_dir, mpc_step),
caption=caption)
return best_pred_obs, actions[ptu.get_numpy(best_action_idxs)], best_goal_idx
class MPC:
def __init__(self, model, env, n_actions, mpc_steps,
n_goal_objs=3,
cost_type='latent_pixel',
filter_goals=False,
true_actions=None,
logger_prefix_dir=None,
mpc_style="random_shooting", # options are random_shooting, cem
cem_steps=2,
use_action_image=True, # True for stage 1, False for stage 3
):
self.model = model
self.env = env
self.n_actions = n_actions
self.mpc_steps = mpc_steps
self.cost_type = cost_type
self.filter_goals = filter_goals
self.cost = Cost(self.cost_type, logger_prefix_dir)
self.true_actions = true_actions
self.n_goal_objs = n_goal_objs
self.mpc_style = mpc_style
self.cem_steps = cem_steps
self.multi_steps = 2
if logger_prefix_dir is not None:
os.mkdir(logger.get_snapshot_dir() + logger_prefix_dir)
self.logger_prefix_dir = logger_prefix_dir
self.use_action_image = use_action_image
def filter_goal_latents(self, goal_latents, goal_latents_mask, goal_latents_recon):
# Keep top goal latents with highest mask area except first
n_goals = self.n_goal_objs
goal_latents_mask[goal_latents_mask < 0.5] = 0
vals, keep = torch.sort(goal_latents_mask.mean(2).mean(1),
descending=True)
goal_latents_recon[keep[n_goals]] += goal_latents_recon[keep[n_goals + 1]]
keep = keep[1:1 + n_goals]
goal_latents = torch.stack([goal_latents[i] for i in keep])
goal_latents_recon = torch.stack([goal_latents_recon[i] for i in keep])
save_image(goal_latents_recon,
logger.get_snapshot_dir() + '%s/mpc_goal_latents_recon.png' %
self.logger_prefix_dir)
# print(vals)
# import pdb; pdb.set_trace()
return goal_latents, goal_latents_recon
def remove_idx(self, array, idx):
return torch.stack([array[i] for i in set(range(array.shape[0])) - set([idx])])
def run(self, goal_image):
goal_image_tensor = ptu.from_numpy(np.moveaxis(goal_image, 2, 0)).unsqueeze(
0).float()[:, :3] / 255. # (1, 3, imsize, imsize)
rec_goal_image, goal_latents, goal_latents_recon, goal_latents_mask = self.model.refine(
goal_image_tensor,
hidden_state=None,
plot_latents=True) # (K, rep_size)
# Keep top 4 goal latents with greatest mask area excluding 1st (background)
if self.filter_goals:
goal_latents, goal_latents_recon = self.filter_goal_latents(goal_latents,
goal_latents_mask,
goal_latents_recon)
true_actions = self.env.move_blocks_side()
#self.true_actions = true_actions
#self.env.initialize(True)
#self.env.reset()
#for i in range(20):
# self.env.step(self.env.sample_action())
obs = self.env.get_observation()
import matplotlib.pyplot as plt
# for i in range(4):
# action = true_actions[i]
#
# print(action)
# obs = self.env.step(action)
# imageio.imsave(logger.get_snapshot_dir() + '%s/action_%d.png' %
# (self.logger_prefix_dir, i), obs)
#import pdb; pdb.set_trace()
#true_actions[:, 2] = 0.2
#true_actions[:, -1] = 3.5
imageio.imsave(logger.get_snapshot_dir() + '%s/initial_image.png' %
self.logger_prefix_dir, obs)
obs_lst = [np.moveaxis(goal_image.astype(np.float32) / 255., 2, 0)[:3]]
pred_obs_lst = [ptu.get_numpy(rec_goal_image)]
actions = []
for mpc_step in range(self.mpc_steps):
pred_obs, action, goal_idx = self.step_mpc(obs, goal_latents, goal_image_tensor,
mpc_step,
goal_latents_recon)
actions.append(action)
obs = self.env.step(action)
pred_obs_lst.append(pred_obs)
obs_lst.append(np.moveaxis(obs, 2, 0))
if goal_latents.shape[0] == 1:
break
# remove matching goal latent from goal latents
if self.cost.remove_goal_latents:
goal_latents = self.remove_idx(goal_latents, goal_idx)
goal_latents_recon = self.remove_idx(goal_latents_recon, goal_idx)
save_image(ptu.from_numpy(np.stack(obs_lst + pred_obs_lst)),
logger.get_snapshot_dir() + '%s/mpc.png' % self.logger_prefix_dir,
nrow=len(obs_lst))
# Compare final obs to goal obs
mse = np.square(ptu.get_numpy(goal_image_tensor.squeeze().permute(1, 2, 0)) - obs).mean()
| |
# Copyright (c) 2017 The Khronos Group Inc.
# Modifications Copyright (c) 2017-2019 Soft8Soft LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import bpy
import os
from pluginUtils.log import printLog
import pluginUtils.gltf as gltf
from .utils import *
def getUsedMaterials():
"""
Gathers and returns all unfiltered, valid Blender materials.
"""
materials = []
for bl_mat in bpy.data.materials:
materials.append(bl_mat)
return materials
def getImageIndex(exportSettings, uri):
"""
Return the image index in the glTF array.
"""
if exportSettings['uri_data'] is None:
return -1
if uri in exportSettings['uri_data']['uri']:
return exportSettings['uri_data']['uri'].index(uri)
return -1
def getTextureIndexByTexture(exportSettings, glTF, bl_texture):
"""
Return the texture index in the glTF array by a given texture. Safer than
"getTextureIndex" in case of different textures with the same image or linked textures with
the same name but with different images.
"""
if (exportSettings['uri_data'] is None or glTF.get('textures') is None
or bl_texture is None):
return -1
bl_image = getTexImage(bl_texture)
if bl_image is None or bl_image.filepath is None:
return -1
uri = getImageExportedURI(exportSettings, bl_image)
image_uri = exportSettings['uri_data']['uri']
tex_name = getTextureName(bl_texture)
index = 0
for texture in glTF['textures']:
if 'source' in texture and 'name' in texture:
current_image_uri = image_uri[texture['source']]
if current_image_uri == uri and texture['name'] == tex_name:
return index
index += 1
return -1
def getTextureIndexNode(exportSettings, glTF, name, shaderNode):
"""
Return the texture index in the glTF array.
"""
if shaderNode is None:
return -1
if not isinstance(shaderNode, (bpy.types.ShaderNodeBsdfPrincipled,
bpy.types.ShaderNodeMixShader,
bpy.types.ShaderNodeGroup)):
return -1
if shaderNode.inputs.get(name) is None:
return -1
if len(shaderNode.inputs[name].links) == 0:
return -1
fromNode = shaderNode.inputs[name].links[0].from_node
if isinstance(fromNode, bpy.types.ShaderNodeNormalMap):
if len(fromNode.inputs['Color'].links) > 0:
fromNode = fromNode.inputs['Color'].links[0].from_node
else:
return -1
if isinstance(fromNode, bpy.types.ShaderNodeSeparateRGB):
if len(fromNode.inputs['Image'].links) > 0:
fromNode = fromNode.inputs['Image'].links[0].from_node
else:
return -1
# color factor
if isinstance(fromNode, bpy.types.ShaderNodeMixRGB) and fromNode.blend_type == 'MULTIPLY':
if len(fromNode.inputs['Color1'].links) > 0:
fromNode = fromNode.inputs['Color1'].links[0].from_node
elif len(fromNode.inputs['Color2'].links) > 0:
fromNode = fromNode.inputs['Color2'].links[0].from_node
else:
return -1
if not isinstance(fromNode, bpy.types.ShaderNodeTexImage):
return -1
if getTexImage(fromNode) is None or getTexImage(fromNode).size[0] == 0 or getTexImage(fromNode).size[1] == 0:
return -1
return getTextureIndexByTexture(exportSettings, glTF, fromNode)
def getTexcoordIndex(glTF, name, shaderNode):
"""
Return the texture coordinate index, if assigend and used.
"""
if shaderNode is None:
return 0
if not isinstance(shaderNode, (bpy.types.ShaderNodeBsdfPrincipled,
bpy.types.ShaderNodeMixShader,
bpy.types.ShaderNodeGroup)):
return 0
if shaderNode.inputs.get(name) is None:
return 0
if len(shaderNode.inputs[name].links) == 0:
return 0
fromNode = shaderNode.inputs[name].links[0].from_node
if isinstance(fromNode, bpy.types.ShaderNodeNormalMap):
fromNode = fromNode.inputs['Color'].links[0].from_node
if isinstance(fromNode, bpy.types.ShaderNodeSeparateRGB):
fromNode = fromNode.inputs['Image'].links[0].from_node
if isinstance(fromNode, bpy.types.ShaderNodeMixRGB) and fromNode.blend_type == 'MULTIPLY':
if len(fromNode.inputs['Color1'].links) > 0:
fromNode = fromNode.inputs['Color1'].links[0].from_node
elif len(fromNode.inputs['Color2'].links) > 0:
fromNode = fromNode.inputs['Color2'].links[0].from_node
if not isinstance(fromNode, bpy.types.ShaderNodeTexImage):
return 0
if len(fromNode.inputs['Vector'].links) == 0:
return 0
inputNode = fromNode.inputs['Vector'].links[0].from_node
if not isinstance(inputNode, bpy.types.ShaderNodeUVMap):
return 0
if inputNode.uv_map == '':
return 0
# try to gather map index.
for bl_mesh in bpy.data.meshes:
texCoordIndex = bl_mesh.uv_layers.find(inputNode.uv_map)
if texCoordIndex >= 0:
return texCoordIndex
return 0
def getMaterialType(bl_mat):
"""
get blender material type: PBR, CYCLES, BASIC
"""
if not bl_mat.use_nodes or bl_mat.node_tree == None:
return 'BASIC'
for bl_node in bl_mat.node_tree.nodes:
if (isinstance(bl_node, bpy.types.ShaderNodeGroup) and
bl_node.node_tree.name.startswith('Verge3D PBR')):
return 'PBR'
if bl_mat.v3d.gltf_compat:
return 'PBR'
return 'CYCLES'
def getSkinIndex(glTF, name, index_offset):
"""
Return the skin index in the glTF array.
"""
if glTF.get('skins') is None:
return -1
skeleton = gltf.getNodeIndex(glTF, name)
index = 0
for skin in glTF['skins']:
if skin['skeleton'] == skeleton:
return index + index_offset
index += 1
return -1
def getCameraIndex(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
def getCurveIndex(glTF, name):
"""
Return the curve index in the glTF array.
"""
v3dExt = gltf.getAssetExtension(glTF, 'S8S_v3d_data')
if v3dExt == None:
return -1
if v3dExt.get('curves') == None:
return -1
curves = v3dExt['curves']
index = 0
for curve in curves:
if curve['name'] == name:
return index
index += 1
return -1
def getNodeGraphIndex(glTF, name):
"""
Return the node graph index in the glTF array.
"""
v3dExt = gltf.getAssetExtension(glTF, 'S8S_v3d_data')
if v3dExt == None:
return -1
if v3dExt.get('nodeGraphs') == None:
return -1
index = 0
for graph in v3dExt['nodeGraphs']:
if graph['name'] == name:
return index
index += 1
return -1
def getImageExportedURI(exportSettings, bl_image):
"""
Return exported URI for a blender image.
"""
name, ext = os.path.splitext(bpy.path.basename(bl_image.filepath))
uri_name = name if name != '' else 'v3d_exported_image_' + bl_image.name
uri_ext = ''
if (bl_image.file_format == 'JPEG'
or bl_image.file_format == 'BMP'
or bl_image.file_format == 'HDR'
or bl_image.file_format == 'PNG'):
if ext != '':
uri_ext = ext
else:
uri_ext = '.png'
uri_data = exportSettings['uri_data']
unique_uri = uri_name + uri_ext
i = 0
while unique_uri in uri_data['uri']:
index = uri_data['uri'].index(unique_uri)
if uri_data['bl_datablocks'][index] == bl_image:
break
i += 1
unique_uri = uri_name + '_' + integerToBlSuffix(i) + uri_ext
return unique_uri
def getImageExportedMimeType(bl_image):
if bl_image.file_format == 'JPEG':
return 'image/jpeg'
elif bl_image.file_format == 'BMP':
return 'image/bmp'
elif bl_image.file_format == 'HDR':
return 'image/vnd.radiance'
else:
return 'image/png'
def getNameInBrackets(data_path):
"""
Return Blender node on a given Blender data path.
"""
if data_path is None:
return None
index = data_path.find("[\"")
if (index == -1):
return None
node_name = data_path[(index + 2):]
index = node_name.find("\"")
if (index == -1):
return None
return node_name[:(index)]
def getAnimParamDim(fcurves, node_name):
dim = 0
for fcurve in fcurves:
if getNameInBrackets(fcurve.data_path) == node_name:
dim = max(dim, fcurve.array_index+1)
return dim
def getAnimParam(data_path):
"""
return animated param in data path:
nodes['name'].outputs[0].default_value -> default_value
"""
index = data_path.rfind('.')
if index == -1:
return data_path
return data_path[(index + 1):]
def getScalar(default_value, init_value = 0.0):
"""
Return scalar with a given default/fallback value.
"""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def getVec2(default_value, init_value = [0.0, 0.0]):
"""
Return vec2 with a given default/fallback value.
"""
return_value = init_value.copy()
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def getVec3(default_value, init_value = [0.0, 0.0, 0.0]):
"""
Return vec3 with a given default/fallback value.
"""
return_value = init_value.copy()
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def getVec4(default_value, init_value = [0.0, 0.0, 0.0, 1.0]):
"""
Return vec4 with a given default/fallback value.
"""
return_value = init_value.copy()
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def getIndex(list, name):
"""
Return index of a glTF element by a given name.
"""
if list is None or name is None:
return -1
index = 0
for element in list:
if element.get('name') is None:
continue
if element['name'] == name:
return index
index += 1
return -1
def getByName(list, name):
"""
Return element by a given name.
"""
if list is None or name is None:
return None
for element in list:
if element.get('name') is None:
continue
if element['name'] == name:
return element
return None
def getOrCreateDefaultMatIndex(glTF):
def_idx = gltf.getMaterialIndex(glTF, DEFAULT_MAT_NAME)
if def_idx == -1:
if 'materials' not in glTF:
glTF['materials'] = []
glTF['materials'].append(createDefaultMaterialCycles())
def_idx = len(glTF['materials']) - 1
return def_idx
def createDefaultMaterialCycles():
return {
"emissiveFactor" : [
0.0,
0.0,
0.0
],
"extensions" : {
"S8S_v3d_material_data" : {
"nodeGraph" : {
"edges" : [
{
"fromNode" : 1,
"fromOutput" : 0,
"toInput" : 0,
"toNode" : 0
}
],
"nodes" : [
{
"inputs" : [
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
0.0
],
| |
<filename>plonetheme/zopeorg/setuphandlers.py
# -*- coding: utf-8 -*-
import collective.setuphandlertools as sht
import logging
logger = logging.getLogger("plonetheme.zopeorg")
def setup_content(context):
if sht.isNotThisProfile(context, 'plonetheme.zopeorg_setup_content.txt'):
return
site = context.getSite()
sht.delete_items(site, ('front-page', 'news', 'events'), logger)
sht.hide_and_retract(site['Members'], logger)
content_structure = [
{'type': 'Image', 'title': 'old.zope.org Screenshot',
'id': u'Screenshotold.zope.org.png',
'opts': {'setExcludeFromNav': True},
'data': {'image': sht.load_file(globals(),
'setupdata/Screenshotold.zope.org.png')}},
{'type': 'Folder', 'title': u'Teasers',
'opts': {'workflow': None, # leave private
'setLocallyAllowedTypes': ['Teaser',],
'setImmediatelyAddableTypes':['Teaser',],
'setLayout': 'folder_summary_view'},
'childs': [
{'type': 'Teaser', 'title': 'The World of Zope',
'data': {'image': sht.load_file(globals(),
'setupdata/teaser_world-of-zope.jpg'),
'importance': u'3'}}]},
{'type': 'Collage', 'title': u'Start', 'id': 'front-page',
'data': { 'show_title': False, 'show_description': False, },
'childs': [
{'type': 'CollageRow', 'title': '', 'id': '1',
'childs': [
# the following column should hold a teaser portlet
{'type': 'CollageColumn', 'title': '', 'id': '1'}]},
{'type': 'CollageRow', 'title': '', 'id': '2',
'childs': [
{'type': 'CollageColumn', 'title': '', 'id': '1',
'childs': [
{'type': 'Document', 'title': u'Zope Community', 'id': u'front-community',
'opts': {'setExcludeFromNav': True},
'data': {'text': START_ZOPE_COMMUNITY_TEXT}}]},
{'type': 'CollageColumn', 'title': '', 'id': '2',
'childs': [
{'type': 'Document', 'title': u'Zope Foundation', 'id': u'front-foundation',
'opts': {'setExcludeFromNav': True},
'data': {'text': START_ZOPE_FOUNDATION_TEXT}}]},
{'type': 'CollageColumn', 'title': '', 'id': '3',
'childs': [
{'type': 'Document', 'title': u'Zope.org legacy', 'id': u'front-legacy',
'opts': {'setExcludeFromNav': True},
'data': {'text': START_ZOPEORG_LEGACY_TEXT}}]},
]},
]},
{'type': 'Document', 'title': u'The World of Zope', 'id': 'the-world-of-zope',
'data': {'text': THE_WORLD_OF_ZOPE_TEXT}},
{'type': 'Document', 'title': u'News & Events', 'id': u'news-events',
'data': {'text': NEWS_EVENTS_TEXT}},
{'type': 'Document', 'title': u'Community', 'id': u'community',
'data': {'text': COMMUNITY_TEXT}},
{'type': 'Document', 'title': u'Resources', 'id': u'resources',
'data': {'text': RESOURCES_TEXT}},
{'type': 'Document', 'title': u'Zope Foundation', 'id': u'foundation',
'data': {'text': ZOPE_FOUNDATION_TEXT}},
{'type': 'Document', 'title': u'Legal', 'id': u'legal',
'opts': {'setExcludeFromNav': True},
'data': {'description':u"""Zope.org Legal Notice.""",
'text': LEGAL_TEXT}},
]
sht.create_item_runner(site, content_structure, logger=logger)
#the collage column will hold a portlet, so the view must be portlets-top
from Products.Collage.interfaces import IDynamicViewManager
manager = IDynamicViewManager(site['front-page']['1']['1'])
manager.setLayout('portlets-top')
#set the link reference in the teaser
site['teasers']['the-world-of-zope'].setLink_internal(site['the-world-of-zope'])
site['teasers']['the-world-of-zope'].reindexObject()
START_ZOPE_COMMUNITY_TEXT = u"""
<p>The Zope community is one of the largest and most professional open-source communities worldwide.</p>
<p><a class="internal-link" href="../community">Learn more...</a></p>
"""
START_ZOPE_FOUNDATION_TEXT = u"""
<p>The Zope Foundation has the goal to promote, maintain, and develop the Zope platform.</p>
<p><a class="internal-link" href="../foundation">Learn more...</a></p>
"""
START_ZOPEORG_LEGACY_TEXT = u"""
<p><a href="http://old.zope.org/" style="padding-left: 0px; "><img alt="old.zope.org" class="image-right" src="Screenshotold.zope.org.png"></a>Looking for the ancient Zope website?</p>
<p>Visit <a class="external-link" href="http://old.zope.org/">old.zope.org</a></p>
"""
THE_WORLD_OF_ZOPE_TEXT = u"""
<p>During more than a decade Zope Corp. and the Zope Community have grown an outstanding set of products and technologies, influencing the general development of Python based Web application servers and tools.</p>
<h2 class="accordion">Frameworks</h2>
<p><strong>ZCA<br /></strong>The Zope Component Architecture provides facilities for defining, registering and looking up components. It's perfect for building enterprise applications based on loosely coupled components.</p>
<p>More information at <a class="external-link" href="http://wiki.zope.org/zope3/ComponentArchitectureOverview">Component Architecture Overview</a>, <a class="external-link" href="http://docs.zope.org/zope.component/">zope.component documentation</a> and <a class="external-link" href="http://docs.zope.org/zope.interface/">zope.interface documentation</a></p>
<p><strong>ZTK<br /></strong>The Zope Toolkit (ZTK) is a set of libraries intended for reuse by projects to develop web applications or web frameworks. The ZCA is part of it.</p>
<p>More information at the <a class="external-link" href="http://docs.zope.org/zopetoolkit/">Zopetoolkit documentation</a></p>
<p><strong>ZPT<br /></strong>Zope Page Templates is Zope's templating mechanism.</p>
<p>More information at the <a class="external-link" href="ZPT documentation in Zope2 Book">docs.zope.org/zope2/zope2book/AppendixC.html</a>. An alternative implementation provides <a class="external-link" href="http://chameleon.repoze.org/">Chameleon</a>.</p>
<p><strong>CMF</strong><br />The Content Management Framework (CMF) for Zope provides a powerful, tailorable platform for building content management applications together with the Zope Application Server.</p>
<p>More information at the <a class="external-link" href="http://old.zope.org/Products/CMF/">CMF Product Page</a></p>
<p><strong>Repoze<br /></strong>Repoze integrates Zope technologies with WSGI and reusable Python middleware.</p>
<p>More information at <a class="external-link" href="http://repoze.org">repoze.org</a></p>
<h2 class="accordion">Databases</h2>
<p><strong>ZODB</strong><br />The Zope Object DataBase (ZODB) is a native object database, that stores your objects while allowing you to work with any paradigms that can be expressed in Python.</p>
<p>More information at <a class="external-link" href="http://zodb.org">zodb.org</a></p>
<h2 class="accordion">Application Servers</h2>
<p><strong>Zope<br /></strong>Zope is a Python-based application server for building secure and highly scalable web applications.</p>
<p>More information at <a class="external-link" href="http://zope2.zope.org">zope2.zope.org</a></p>
<p><strong>BlueBream</strong><br />BlueBream – formerly known as Zope 3 – is a web framework written in the Python programming language.</p>
<p>More information at <a class="external-link" href="http://bluebream.zope.org">bluebream.zope.org</a></p>
<p><strong>Grok<br /></strong>Grok is a web application framework for Python developers.</p>
<p>More information at <a class="external-link" href="http://grok.zope.org">grok.zope.org</a></p>
<h2 class="accordion">Tools</h2>
<p><strong>Buildout</strong><br />Buildout is a Python-based build system for creating, assembling and deploying applications from multiple parts, some of which may be non-Python-based.</p>
<p>More information at <a class="external-link" href="http://www.buildout.org">Buildout.org</a></p>
<h2 class="accordion">Zope based Software</h2>
<p><strong>Plone</strong><br />
Plone is a user friendly Content Management System running on top of Python, Zope and the CMF. It's a perfect fit in collaborative, enterprise environments but also for small sites.</p>
<p>More information at <a class="external-link" href="http://www.plone.org">Plone.org</a></p>
<p><strong>Pyramid</strong><br />Pyramid is a small, fast, down-to-earth, open source Python web application development framework. It makes real-world web application development and deployment more fun, more predictable, and more productive.</p>
<p>More information at <a class="external-link" href="http://www.pylonsproject.org/projects/pyramid/about">Pyramid</a></p>
<p><strong>Silva</strong><br /><span>Silva is a powerful CMS for organizations that manage multiple or complex websites.</span></p>
<p>More information at <a class="external-link" href="http://infrae.com/products/silva">Silva</a></p>
<p><strong>ERP5</strong><br /><span>ERP5 is a full featured Open Source ERP/CRM application framework, based on an unified Model to describe its implementation.
<p>More information at <a class="external-link" href="http://www.erp5.com/">ERP5.com</a></p>
"""
## ... verwenden nur einen kleinen teil von zope. nicht mit fremden federn schmücken
#<p><strong>Twisted</strong><br />An asynchronous, extensible networking framework, with special focus on event-based programming and multiprotocol integration.</p>
#<p>More information at <a class="external-link" href="http://twistedmatrix.com/">twistedmatrix.com</a></p>
#<p><strong>Mailman 3</strong><br />Mailman is free software for managing electronic mail discussion and e-newsletter lists.</p>
#<p>More information at <a class="external-link" href="https://launchpad.net/mailman">Mailman</a></p>
NEWS_EVENTS_TEXT = u"""
<p>Find interesting news and events listed at <a class="external-link" href="http://calendar.zope.org">calendar.zope.org</a>.</p>
<p>Additional information is available from the major RSS feeds</p>
<ul>
<li><a class="external-link" href="http://planet.plone.org">Planet Plone</a></li>
<li><a class="external-link" href="http://planet.python.org">Planet Python</a></li>
</ul>
"""
# Planet Zope doesn't exist any more
# <li><a class="external-link" href="http://planetzope.org">Planet Zope</a></li>
COMMUNITY_TEXT = u"""
<p>The Zope community is one of the largest and most professional open-source communities worldwide.</p>
<h2>Mailing Lists</h2>
<p>Main Zope related mailing list collection is available at <a class="external-link" href="https://mail.zope.org/mailman/listinfo">mail.zope.org/mailman/listinfo</a></p>
<h2>IRC</h2>
<p style="padding-left: 0px; ">freenode.net hosts lots of Zope and Zope products/application related IRC channels. Visit <a class="external-link" href="http://irc.freenode.net">irc.freenode.net</a> and try one of the following channels: #zope, #zope.de, #zope3-dev, #plone, #bluebream, #grok</p>
<h2 style="padding-left: 0px; ">Websites</h2>
<p style="padding-left: 0px; ">Localized Zope related websites, e.g. <a class="external-link" href="http://www.zope.de">www.zope.de<br /></a>Audience/Tool/Product targeted websites, e.g. <a class="external-link" href="http://zope2.zope.org">zope2.zope.org</a>, <a class="external-link" href="http://bluebream.zope.org">bluebream.zope.org</a>, <a class="external-link" href="http://grok.zope.org">grok.zope.org</a>, <a class="external-link" href="http://docs.zope.org">docs.zope.org</a>, <a class="external-link" href="http://buildout.zope.org">buildout.zope.org</a></p>
<h2 style="padding-left: 0px; ">Planets</h2>
<p style="padding-left: 0px; ">News collections from different Zope related blogs, like <a class="external-link" href="http://planet.plone.org">Planet Plone</a> and <a class="external-link" href="http://planet.python.org">Planet Python</a>.</p>
"""
RESOURCES_TEXT = u"""
<h2>Code Repositories</h2>
<p>Zope public subversion repository provides read-only and selective write access to the source code for Zope's and related projects: <a class="external-link" href="http://svn.zope.org">svn.zope.org</a></p>
<h2>PyPI</h2>
<p>Zope projects @ Python Package Index: <a class="external-link" href="http://pypi.python.org/pypi?:action=browse&amp;show=all&amp;c=514">Zope2 related projects</a>, <a class="external-link" href="http://pypi.python.org/pypi?:action=browse&amp;show=all&amp;c=515">Zope3 related projects</a></p>
<h2>Bug tracking</h2>
<p>Launchpad is an open source suite of tools that help people and teams to work together on software projects. Launchpad itself is built with Zope 3. Look at the <a class="external-link" href="https://launchpad.net/zope/">Zope project hub @ Launchpad</a></p>
<h2>Documentation</h2>
<p>The hub website to Zope community documentation projects is at <a class="external-link" href="http://docs.zope.org/">docs.zope.org</a></p>
<h2>Wiki</h2>
<p>Community maintained documentation, scratchpad and further information. Hub site to Zope community wiki documentation: <a class="external-link" href="http://wiki.zope.org/">wiki.zope.org</a></p>
<h2>Books</h2>
<p>Get Books about Zope for online and offline reading.</p>
<p>Books on Zope, Plone, Grok, Zope3, Bluebream, Repoze, Zope Component Architecture</p>
<h2>Archive</h2>
<p>Looking for the ancient Zope website? Visit <a class="external-link" href="http://old.zope.org">http://old.zope.org</a>.</p>
"""
ZOPE_FOUNDATION_TEXT = u"""
<p>The Zope Foundation has the goal to promote, maintain, and develop the Zope platform. It does this by supporting the Zope community.</p>
<p>Our community includes the open source community of contributors to the Zope software, contributors to the documentation and web infrastructure, as well as the community of businesses and organizations that use Zope. The Zope Foundation is the copyright holder of the Zope software and many extensions and associated software. The Zope Foundation also manages the zope.org website, and manages the infrastructure for open source collaboration.</p>
<p>For more information, visit <a class="external-link" href="http://foundation.zope.org">foundation.zope.org</a>.</p>
<h2>Contacting the Zope Foundation</h2>
<p style="padding-left: 30px; ">
<strong>Zope Foundation</strong><br />
Email: <a class="mail-link" href="mailto:<EMAIL>"><EMAIL></a><br />
Fax: +1 (703) 842-8076<br />
</p>
"""
LEGAL_TEXT = u"""
<p>All materials found on this web site are the property of Zope Foundation
and all rights are reserved. The information contained in and on the various pages
of the Zope.org web site have been issued for general distribution under
the protection of United States copyright laws. In addition to US copyright laws,
the information presented on Zope.org web site is protected under the
Berne Convention for the Protection of Literature and Artistic works, as well as
under other international conventions and under national laws on copyright and
neighboring rights.</p>
<p>Extracts of the information in the web site may be reviewed, reproduced or
translated for research or private study but not for sale or for use in conjunction
with commercial purposes. | |
= Constraint(expr= - m.b250 + m.b726 <= 0)
m.c1567 = Constraint(expr= - m.b251 + m.b727 <= 0)
m.c1568 = Constraint(expr= - m.b252 + m.b728 <= 0)
m.c1569 = Constraint(expr= - m.b253 + m.b729 <= 0)
m.c1570 = Constraint(expr= - m.b254 + m.b730 <= 0)
m.c1571 = Constraint(expr= - m.b255 + m.b731 <= 0)
m.c1572 = Constraint(expr= - m.b256 + m.b732 <= 0)
m.c1573 = Constraint(expr= - m.b257 + m.b733 <= 0)
m.c1574 = Constraint(expr= - m.b258 + m.b734 <= 0)
m.c1575 = Constraint(expr= - m.b259 + m.b735 <= 0)
m.c1576 = Constraint(expr= - m.b260 + m.b736 <= 0)
m.c1577 = Constraint(expr= - m.b261 + m.b737 <= 0)
m.c1578 = Constraint(expr= - m.b262 + m.b738 <= 0)
m.c1579 = Constraint(expr= - m.b263 + m.b739 <= 0)
m.c1580 = Constraint(expr= - m.b264 + m.b740 <= 0)
m.c1581 = Constraint(expr= - m.b265 + m.b741 <= 0)
m.c1582 = Constraint(expr= m.b742 <= 0)
m.c1583 = Constraint(expr= m.b743 <= 0)
m.c1584 = Constraint(expr= m.b744 <= 0)
m.c1585 = Constraint(expr= m.b745 <= 0)
m.c1586 = Constraint(expr= - m.b247 + m.b722 <= 0)
m.c1587 = Constraint(expr= - m.b248 + m.b723 <= 0)
m.c1588 = Constraint(expr= - m.b249 + m.b724 <= 0)
m.c1589 = Constraint(expr= - m.b250 + m.b725 <= 0)
m.c1590 = Constraint(expr= - m.b251 + m.b726 <= 0)
m.c1591 = Constraint(expr= - m.b252 + m.b727 <= 0)
m.c1592 = Constraint(expr= - m.b253 + m.b728 <= 0)
m.c1593 = Constraint(expr= - m.b254 + m.b729 <= 0)
m.c1594 = Constraint(expr= - m.b255 + m.b730 <= 0)
m.c1595 = Constraint(expr= - m.b256 + m.b731 <= 0)
m.c1596 = Constraint(expr= - m.b257 + m.b732 <= 0)
m.c1597 = Constraint(expr= - m.b258 + m.b733 <= 0)
m.c1598 = Constraint(expr= - m.b259 + m.b734 <= 0)
m.c1599 = Constraint(expr= - m.b260 + m.b735 <= 0)
m.c1600 = Constraint(expr= - m.b261 + m.b736 <= 0)
m.c1601 = Constraint(expr= - m.b262 + m.b737 <= 0)
m.c1602 = Constraint(expr= - m.b263 + m.b738 <= 0)
m.c1603 = Constraint(expr= - m.b264 + m.b739 <= 0)
m.c1604 = Constraint(expr= - m.b265 + m.b740 <= 0)
m.c1605 = Constraint(expr= m.b741 <= 0)
m.c1606 = Constraint(expr= m.b742 <= 0)
m.c1607 = Constraint(expr= m.b743 <= 0)
m.c1608 = Constraint(expr= m.b744 <= 0)
m.c1609 = Constraint(expr= m.b745 <= 0)
m.c1610 = Constraint(expr= - m.b248 + m.b722 <= 0)
m.c1611 = Constraint(expr= - m.b249 + m.b723 <= 0)
m.c1612 = Constraint(expr= - m.b250 + m.b724 <= 0)
m.c1613 = Constraint(expr= - m.b251 + m.b725 <= 0)
m.c1614 = Constraint(expr= - m.b252 + m.b726 <= 0)
m.c1615 = Constraint(expr= - m.b253 + m.b727 <= 0)
m.c1616 = Constraint(expr= - m.b254 + m.b728 <= 0)
m.c1617 = Constraint(expr= - m.b255 + m.b729 <= 0)
m.c1618 = Constraint(expr= - m.b256 + m.b730 <= 0)
m.c1619 = Constraint(expr= - m.b257 + m.b731 <= 0)
m.c1620 = Constraint(expr= - m.b258 + m.b732 <= 0)
m.c1621 = Constraint(expr= - m.b259 + m.b733 <= 0)
m.c1622 = Constraint(expr= - m.b260 + m.b734 <= 0)
m.c1623 = Constraint(expr= - m.b261 + m.b735 <= 0)
m.c1624 = Constraint(expr= - m.b262 + m.b736 <= 0)
m.c1625 = Constraint(expr= - m.b263 + m.b737 <= 0)
m.c1626 = Constraint(expr= - m.b264 + m.b738 <= 0)
m.c1627 = Constraint(expr= - m.b265 + m.b739 <= 0)
m.c1628 = Constraint(expr= m.b740 <= 0)
m.c1629 = Constraint(expr= m.b741 <= 0)
m.c1630 = Constraint(expr= m.b742 <= 0)
m.c1631 = Constraint(expr= m.b743 <= 0)
m.c1632 = Constraint(expr= m.b744 <= 0)
m.c1633 = Constraint(expr= m.b745 <= 0)
m.c1634 = Constraint(expr= - m.b249 + m.b722 <= 0)
m.c1635 = Constraint(expr= - m.b250 + m.b723 <= 0)
m.c1636 = Constraint(expr= - m.b251 + m.b724 <= 0)
m.c1637 = Constraint(expr= - m.b252 + m.b725 <= 0)
m.c1638 = Constraint(expr= - m.b253 + m.b726 <= 0)
m.c1639 = Constraint(expr= - m.b254 + m.b727 <= 0)
m.c1640 = Constraint(expr= - m.b255 + m.b728 <= 0)
m.c1641 = Constraint(expr= - m.b256 + m.b729 <= 0)
m.c1642 = Constraint(expr= - m.b257 + m.b730 <= 0)
m.c1643 = Constraint(expr= - m.b258 + m.b731 <= 0)
m.c1644 = Constraint(expr= - m.b259 + m.b732 <= 0)
m.c1645 = Constraint(expr= - m.b260 + m.b733 <= 0)
m.c1646 = Constraint(expr= - m.b261 + m.b734 <= 0)
m.c1647 = Constraint(expr= - m.b262 + m.b735 <= 0)
m.c1648 = Constraint(expr= - m.b263 + m.b736 <= 0)
m.c1649 = Constraint(expr= - m.b264 + m.b737 <= 0)
m.c1650 = Constraint(expr= - m.b265 + m.b738 <= 0)
m.c1651 = Constraint(expr= m.b739 <= 0)
m.c1652 = Constraint(expr= m.b740 <= 0)
m.c1653 = Constraint(expr= m.b741 <= 0)
m.c1654 = Constraint(expr= m.b742 <= 0)
m.c1655 = Constraint(expr= m.b743 <= 0)
m.c1656 = Constraint(expr= m.b744 <= 0)
m.c1657 = Constraint(expr= m.b745 <= 0)
m.c1658 = Constraint(expr= - m.b250 + m.b722 <= 0)
m.c1659 = Constraint(expr= - m.b251 + m.b723 <= 0)
m.c1660 = Constraint(expr= - m.b252 + m.b724 <= 0)
m.c1661 = Constraint(expr= - m.b253 + m.b725 <= 0)
m.c1662 = Constraint(expr= - m.b254 + m.b726 <= 0)
m.c1663 = Constraint(expr= - m.b255 + m.b727 <= 0)
m.c1664 = Constraint(expr= - m.b256 + m.b728 <= 0)
m.c1665 = Constraint(expr= - m.b257 + m.b729 <= 0)
m.c1666 = Constraint(expr= - m.b258 + m.b730 <= 0)
m.c1667 = Constraint(expr= - m.b259 + m.b731 <= 0)
m.c1668 = Constraint(expr= - m.b260 + m.b732 <= 0)
m.c1669 = Constraint(expr= - m.b261 + m.b733 <= 0)
m.c1670 = Constraint(expr= - m.b262 + m.b734 <= 0)
m.c1671 = Constraint(expr= - m.b263 + m.b735 <= 0)
m.c1672 = Constraint(expr= - m.b264 + m.b736 <= 0)
m.c1673 = Constraint(expr= - m.b265 + m.b737 <= 0)
m.c1674 = Constraint(expr= m.b738 <= 0)
m.c1675 = Constraint(expr= m.b739 <= 0)
m.c1676 = Constraint(expr= m.b740 <= 0)
m.c1677 = Constraint(expr= m.b741 <= 0)
m.c1678 = Constraint(expr= m.b742 <= 0)
m.c1679 = Constraint(expr= m.b743 <= 0)
m.c1680 = Constraint(expr= m.b744 <= 0)
m.c1681 = Constraint(expr= m.b745 <= 0)
m.c1682 = Constraint(expr= - m.b267 + m.b746 <= 0)
m.c1683 = Constraint(expr= - m.b268 + m.b747 <= 0)
m.c1684 = Constraint(expr= - m.b269 + m.b748 <= 0)
m.c1685 = Constraint(expr= - m.b270 + m.b749 <= 0)
m.c1686 = Constraint(expr= - m.b271 + m.b750 <= 0)
m.c1687 = Constraint(expr= - m.b272 + m.b751 <= 0)
m.c1688 = Constraint(expr= - m.b273 + m.b752 <= 0)
m.c1689 = Constraint(expr= - m.b274 + m.b753 <= 0)
m.c1690 = Constraint(expr= - m.b275 + m.b754 <= 0)
m.c1691 = Constraint(expr= - m.b276 + m.b755 <= 0)
m.c1692 = Constraint(expr= - m.b277 + m.b756 <= 0)
m.c1693 = Constraint(expr= - m.b278 + m.b757 <= 0)
m.c1694 = Constraint(expr= - m.b279 + m.b758 <= 0)
m.c1695 = Constraint(expr= - m.b280 + m.b759 <= 0)
m.c1696 = Constraint(expr= - m.b281 + m.b760 <= 0)
m.c1697 = Constraint(expr= - m.b282 + m.b761 <= 0)
m.c1698 = Constraint(expr= - m.b283 + m.b762 <= 0)
m.c1699 = Constraint(expr= - m.b284 + m.b763 <= 0)
m.c1700 = Constraint(expr= - m.b285 + m.b764 <= 0)
m.c1701 = Constraint(expr= - m.b286 + m.b765 <= 0)
m.c1702 = Constraint(expr= - m.b287 + m.b766 <= 0)
m.c1703 = Constraint(expr= - m.b288 + m.b767 <= 0)
m.c1704 = Constraint(expr= - m.b289 + m.b768 <= 0)
m.c1705 = Constraint(expr= m.b769 <= 0)
m.c1706 = Constraint(expr= - m.b268 + m.b746 <= 0)
m.c1707 = Constraint(expr= - m.b269 + m.b747 <= 0)
m.c1708 = Constraint(expr= - m.b270 + m.b748 <= 0)
m.c1709 = Constraint(expr= - m.b271 + m.b749 <= 0)
m.c1710 = Constraint(expr= - m.b272 + m.b750 <= 0)
m.c1711 = Constraint(expr= - m.b273 + m.b751 <= 0)
m.c1712 = Constraint(expr= - m.b274 + m.b752 <= 0)
m.c1713 = Constraint(expr= - m.b275 + m.b753 <= 0)
m.c1714 = Constraint(expr= - m.b276 + m.b754 <= 0)
m.c1715 = Constraint(expr= - m.b277 + m.b755 <= 0)
m.c1716 = Constraint(expr= - m.b278 + m.b756 <= 0)
m.c1717 = Constraint(expr= - m.b279 + m.b757 <= 0)
m.c1718 = Constraint(expr= - m.b280 + m.b758 <= 0)
m.c1719 = Constraint(expr= - m.b281 + m.b759 <= 0)
m.c1720 = Constraint(expr= - m.b282 + m.b760 <= 0)
m.c1721 = Constraint(expr= - m.b283 + m.b761 <= 0)
m.c1722 = Constraint(expr= - m.b284 + m.b762 <= 0)
m.c1723 = Constraint(expr= - m.b285 + m.b763 <= 0)
m.c1724 = Constraint(expr= - m.b286 + m.b764 <= 0)
m.c1725 = Constraint(expr= - m.b287 + m.b765 <= 0)
m.c1726 = Constraint(expr= - m.b288 + m.b766 <= 0)
m.c1727 = Constraint(expr= - m.b289 + m.b767 <= 0)
m.c1728 = Constraint(expr= m.b768 <= 0)
m.c1729 = Constraint(expr= m.b769 <= 0)
m.c1730 = Constraint(expr= - m.b269 + m.b746 <= 0)
m.c1731 = Constraint(expr= - m.b270 + m.b747 <= 0)
m.c1732 = Constraint(expr= - m.b271 + m.b748 <= 0)
m.c1733 = Constraint(expr= - m.b272 + m.b749 <= 0)
m.c1734 = Constraint(expr= - m.b273 + m.b750 <= 0)
m.c1735 = Constraint(expr= - m.b274 + m.b751 <= 0)
m.c1736 = Constraint(expr= - m.b275 + m.b752 <= 0)
m.c1737 = Constraint(expr= - m.b276 + m.b753 <= 0)
m.c1738 = Constraint(expr= - | |
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / ubin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / vbin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_arc.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_partial():
# Test the two ways to only use parts of a catalog:
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=84)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=99)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3a = treecorr.Catalog(x=x3, y=y3, first_row=22, last_row=67)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('ddda.ntri = ',ddda.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(27,84):
for j in range(47,99):
for k in range(21,67):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
assert d1 >= d2 >= d3
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
print('true_ntri = ',true_ntri_sum)
print('diff = ',ddda.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddda.ntri, true_ntri_sum)
# Now with real CrossCorrelation
ddda = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('132 = ',ddda.n1n3n2.ntri)
#print('true 132 = ',true_ntri_132)
#print('213 = ',ddda.n2n1n3.ntri)
#print('true 213 = ',true_ntri_213)
#print('231 = ',ddda.n2n3n1.ntri)
#print('true 231 = ',true_ntri_231)
#print('311 = ',ddda.n3n1n2.ntri)
#print('true 312 = ',true_ntri_312)
#print('321 = ',ddda.n3n2n1.ntri)
#print('true 321 = ',true_ntri_321)
np.testing.assert_array_equal(ddda.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddda.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddda.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddda.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddda.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddda.n3n2n1.ntri, true_ntri_321)
# Now check that we get the same thing with all the points, but with w=0 for the ones
# we don't want.
w1 = np.zeros(ngal)
w1[27:84] = 1.
w2 = np.zeros(ngal)
w2[47:99] = 1.
w3 = np.zeros(ngal)
w3[21:67] = 1.
cat1b = treecorr.Catalog(x=x1, y=y1, w=w1)
cat2b = treecorr.Catalog(x=x2, y=y2, w=w2)
cat3b = treecorr.Catalog(x=x3, y=y3, w=w3)
dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.ntri = ',dddb.ntri)
#print('diff = ',dddb.ntri - true_ntri_sum)
np.testing.assert_array_equal(dddb.ntri, true_ntri_sum)
dddb = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.n1n2n3.ntri = ',dddb.n1n2n3.ntri)
#print('diff = ',dddb.n1n2n3.ntri - true_ntri)
np.testing.assert_array_equal(dddb.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddb.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddb.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddb.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddb.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddb.n3n2n1.ntri, true_ntri_321)
@timer
def test_direct_3d_auto():
# This is the same as test_direct_count_auto, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(312, s, (ngal,) )
y = rng.normal(728, s, (ngal,) )
z = rng.normal(-932, s, (ngal,) )
r = np.sqrt( x*x + y*y + z*z )
dec = np.arcsin(z/r)
ra = np.arctan2(y,x)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff | |
self.params.forward_only:
# TODO(laigd): use the actual accuracy op names of the model.
header_str += '\ttop_1_accuracy\ttop_5_accuracy'
log_fn(header_str)
assert len(step_train_times) == self.num_warmup_batches
# reset times to ignore warm up batch
step_train_times = []
loop_start_time = time.time()
if (summary_writer and (local_step + 1) % self.params.save_summaries_steps == 0):
fetch_summary = summary_op
else:
fetch_summary = None
collective_graph_key = 7 if (self.params.variable_update == 'collective_all_reduce') else 0
(summary_str, last_average_loss, _) = benchmark_one_step(
sess, graph_info.fetches, local_step,
self.batch_size * (self.num_workers
if self.single_session else 1), step_train_times,
self.trace_filename, self.params.partitioned_graph_file_prefix,
profiler, image_producer, self.params, fetch_summary,
benchmark_logger=self.benchmark_logger,
collective_graph_key=collective_graph_key,
track_mvav_op=graph_info.mvav_op)
local_step += 1
if summary_str is not None and is_chief:
sv.summary_computed(sess, summary_str)
self.do_extra_summaries(summary_writer = summary_writer, local_step = local_step, sess=sess, graph_info=graph_info)
if (self.my_params.num_steps_per_hdf5 > 0 and local_step % self.my_params.num_steps_per_hdf5 == 0 and local_step > 0 and is_chief):
self.save_hdf5_by_global_step(sess.run(graph_info.global_step))
if (self.params.save_model_steps and local_step % self.params.save_model_steps == 0 and local_step > 0 and is_chief):
sv.saver.save(sess, sv.save_path, sv.global_step)
if self.lr_boundaries is not None and local_step % 100 == 0 and local_step > 0 and is_chief:
cur_global_step = sess.run(graph_info.global_step)
for b in self.lr_boundaries:
if b > cur_global_step and b - cur_global_step < 100:
sv.saver.save(sess, sv.save_path, sv.global_step)
self.save_hdf5_by_global_step(cur_global_step)
break
if self.my_params.frequently_save_interval is not None and self.my_params.frequently_save_last_epochs is not None and local_step % self.my_params.frequently_save_interval == 0 and local_step > 0 and is_chief:
cur_global_step = sess.run(graph_info.global_step)
remain_steps = self.num_batches - cur_global_step
remain_epochs = remain_steps * self.batch_size / self.dataset.num_examples_per_epoch(self.subset)
if remain_epochs < self.my_params.frequently_save_last_epochs:
self.save_hdf5_by_global_step(cur_global_step)
loop_end_time = time.time()
# Waits for the global step to be done, regardless of done_fn.
if global_step_watcher:
while not global_step_watcher.done():
time.sleep(.25)
if not global_step_watcher:
elapsed_time = loop_end_time - loop_start_time
average_wall_time = elapsed_time / local_step if local_step > 0 else 0
images_per_sec = (self.num_workers * local_step * self.batch_size /
elapsed_time)
num_steps = local_step * self.num_workers
else:
# NOTE: Each worker independently increases the global step. So,
# num_steps will be the sum of the local_steps from each worker.
num_steps = global_step_watcher.num_steps()
elapsed_time = global_step_watcher.elapsed_time()
average_wall_time = (elapsed_time * self.num_workers / num_steps
if num_steps > 0 else 0)
images_per_sec = num_steps * self.batch_size / elapsed_time
if self.my_params.save_hdf5:
print('start saving the final hdf5 to ', self.my_params.save_hdf5)
self.save_weights_to_hdf5(self.my_params.save_hdf5)
if self.my_params.save_mvav:
self.save_moving_average_weights_to_hdf5(self.my_params.save_hdf5.replace('.hdf5', '_mvav.hdf5'),
moving_averages=self.variable_averages)
log_fn('-' * 64)
# TODO(laigd): rename 'images' to maybe 'inputs'.
log_fn('total images/sec: %.2f' % images_per_sec)
log_fn('-' * 64)
if image_producer is not None:
image_producer.done()
if is_chief:
if self.benchmark_logger:
self.benchmark_logger.log_metric(
'average_examples_per_sec', images_per_sec, global_step=num_steps)
# Save the model checkpoint.
if self.params.train_dir is not None and is_chief:
checkpoint_path = os.path.join(self.params.train_dir, 'model.ckpt')
if not gfile.Exists(self.params.train_dir):
gfile.MakeDirs(self.params.train_dir)
sv.saver.save(sess, checkpoint_path, graph_info.global_step)
if graph_info.execution_barrier:
# Wait for other workers to reach the end, so this worker doesn't
# go away underneath them.
sess.run([graph_info.execution_barrier])
sv.stop()
if profiler:
generate_tfprof_profile(profiler, self.params.tfprof_file)
stats = {
'num_workers': self.num_workers,
'num_steps': num_steps,
'average_wall_time': average_wall_time,
'images_per_sec': images_per_sec
}
if last_average_loss is not None:
stats['last_average_loss'] = last_average_loss
return stats
def save_hdf5_by_global_step(self, cur_global_step):
save_path = os.path.join(self.params.train_dir, 'ckpt_step_{}.hdf5'.format(cur_global_step))
print('saving hdf5 to ', save_path)
self.save_weights_to_hdf5(hdf5_file=save_path)
if self.my_params.save_mvav:
self.save_moving_average_weights_to_hdf5(save_path.replace('.hdf5', '_mvav.hdf5'),
moving_averages=self.variable_averages)
def add_forward_pass_and_gradients(self,
phase_train,
rel_device_num,
abs_device_num,
input_processing_info,
gpu_compute_stage_ops,
gpu_grad_stage_ops):
"""Add ops for forward-pass and gradient computations."""
nclass = self.dataset.num_classes
if self.datasets_use_prefetch:
function_buffering_resource = None
if input_processing_info.function_buffering_resources:
function_buffering_resource = (
input_processing_info.function_buffering_resources[rel_device_num])
input_data = None
if input_processing_info.multi_device_iterator_input:
input_data = (
input_processing_info.multi_device_iterator_input[rel_device_num])
# Exactly one of function_buffering_resource or input_data is not None.
if function_buffering_resource is None and input_data is None:
raise ValueError('Both function_buffering_resource and input_data '
'cannot be null if datasets_use_prefetch=True')
if function_buffering_resource is not None and input_data is not None:
raise ValueError('Both function_buffering_resource and input_data '
'cannot be specified. Only one should be.')
with tf.device(self.raw_devices[rel_device_num]):
if function_buffering_resource is not None:
input_list = prefetching_ops.function_buffering_resource_get_next(
function_buffering_resource,
output_types=self.model.get_input_data_types())
else:
input_list = input_data
else:
if not self.dataset.use_synthetic_gpu_inputs():
input_producer_stage = input_processing_info.input_producer_stages[
rel_device_num]
with tf.device(self.cpu_device):
host_input_list = input_producer_stage.get()
with tf.device(self.raw_devices[rel_device_num]):
gpu_compute_stage = data_flow_ops.StagingArea(
[inp.dtype for inp in host_input_list],
shapes=[inp.get_shape() for inp in host_input_list])
# The CPU-to-GPU copy is triggered here.
gpu_compute_stage_op = gpu_compute_stage.put(host_input_list)
input_list = gpu_compute_stage.get()
gpu_compute_stage_ops.append(gpu_compute_stage_op)
else:
with tf.device(self.raw_devices[rel_device_num]):
# Minor hack to avoid H2D copy when using synthetic data
input_list = self.model.get_synthetic_inputs(
BenchmarkCNN.GPU_CACHED_INPUT_VARIABLE_NAME, nclass)
with tf.device(self.devices[rel_device_num]):
input_shapes = self.model.get_input_shapes()
input_list = [
tf.reshape(input_list[i], shape=input_shapes[i])
for i in range(len(input_list))
]
def forward_pass_and_gradients():
"""Builds forward pass and gradient computation network.
When phase_train=True and print_training_accuracy=False:
return [loss] + grads
When phase_train=True and print_training_accuracy=True:
return [logits, loss] + grads
When phase_train=False,
return [logits]
Its output can always be unpacked by
```
outputs = forward_pass_and_gradients()
logits, loss, grads = unpack_forward_pass_and_gradients_output(outputs)
```
Returns:
outputs: A list of tensors depending on different modes.
"""
self.convnet_builder = self.get_convnet_builder(input_list=input_list, phase_train=phase_train)
if self.my_params.need_record_internal_outputs:
self.convnet_builder.enable_record_internal_outputs()
build_network_result = self.model.build_network(
self.convnet_builder, nclass)
logits = build_network_result.logits
base_loss = self.model.loss_function(input_list, build_network_result)
self.postprocess_after_build_by_convnet_builder(self.convnet_builder, build_network_result)
if not phase_train:
assert self.num_gpus == 1
eval_fetches = [logits, base_loss]
return eval_fetches
params = self.variable_mgr.trainable_variables_on_device(
rel_device_num, abs_device_num)
l2_loss = None
total_loss = base_loss
with tf.name_scope('l2_loss'):
if self.my_params.apply_l2_on_vector_params:
params_to_regularize = [p for p in params]
else:
params_to_regularize = [p for p in params if len(p.get_shape()) in [2, 4]]
print('add l2 loss on these params:', [p.name for p in params_to_regularize])
if self.model.data_type == tf.float16 and self.params.fp16_vars:
# fp16 reductions are very slow on GPUs, so cast to fp32 before
# calling tf.nn.l2_loss and tf.add_n.
# TODO(b/36217816): Once the bug is fixed, investigate if we should do
# this reduction in fp16.
params_to_regularize = (tf.cast(p, tf.float32) for p in params_to_regularize)
if rel_device_num == len(self.devices) - 1:
# We compute the L2 loss for only one device instead of all of them,
# because the L2 loss for each device is the same. To adjust for this,
# we multiply the L2 loss by the number of devices. We choose the
# last device because for some reason, on a Volta DGX1, the first four
# GPUs take slightly longer to complete a step than the last four.
# TODO(reedwm): Shard the L2 loss computations across GPUs.
custom_l2_loss = self.model.custom_l2_loss(params_to_regularize)
if custom_l2_loss is not None:
l2_loss = custom_l2_loss
print('use the custom l2 loss')
elif self.params.single_l2_loss_op:
# TODO(reedwm): If faster, create a fused op that does the L2 loss
# on multiple tensors, and use that instead of concatenating
# tensors.
reshaped_params = [tf.reshape(p, (-1,)) for p in params_to_regularize]
l2_loss = tf.nn.l2_loss(tf.concat(reshaped_params, axis=0))
else:
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params_to_regularize])
weight_decay = self.params.weight_decay
if (weight_decay is not None and weight_decay != 0. and
l2_loss is not None):
print('the l2 loss factor (weight decay) is ', weight_decay)
total_loss += len(self.devices) * weight_decay * l2_loss
aggmeth = tf.AggregationMethod.DEFAULT
scaled_loss = (total_loss if self.loss_scale is None
else total_loss * self.loss_scale)
grads = tf.gradients(scaled_loss, params, aggregation_method=aggmeth)
if self.loss_scale is not None:
# TODO(reedwm): If automatic loss scaling is not used, we could avoid
# these multiplications by directly modifying the learning rate instead.
# If this is done, care must be taken to ensure that this scaling method
# is correct, as some optimizers square gradients and do other
# operations which might not be compatible with modifying both the
# gradients and the learning rate.
grads = [
grad * tf.cast(1. / self.loss_scale, grad.dtype) for grad in grads
]
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
if self.params.horovod_device:
horovod_device = '/%s:0' % self.params.horovod_device
else:
horovod_device = ''
# All-reduce gradients using Horovod.
grads = [hvd.allreduce(grad, average=False, device_dense=horovod_device)
for grad in grads]
if self.params.staged_vars:
grad_dtypes = [grad.dtype for grad in grads]
grad_shapes = [grad.shape for grad in grads]
grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes)
grad_stage_op = grad_stage.put(grads)
# In general, this decouples the computation of the gradients and
# the updates of the weights.
# During the pipeline warm up, this runs enough training to produce
# the first set of gradients.
gpu_grad_stage_ops.append(grad_stage_op)
grads = grad_stage.get()
if self.params.loss_type_to_report == 'total_loss':
loss = total_loss
else:
loss = base_loss
if self.params.print_training_accuracy:
return [logits, loss] + grads
else:
return [loss] + grads
def unpack_forward_pass_and_gradients_output(forward_pass_and_grad_outputs):
"""Unpacks outputs from forward_pass_and_gradients.
Args:
forward_pass_and_grad_outputs: Output from forward_pass_and_gradients.
Returns:
logits: Unscaled probability distribution from forward pass.
If unavailable, None is returned.
loss: Loss function result from logits.
If unavailable, None is returned.
grads: Gradients for all trainable variables.
If unavailable, None is | |
<reponame>sjanzou/pvlib-python<filename>pvlib/atmosphere.py
"""
The ``atmosphere`` module contains methods to calculate relative and
absolute airmass and to determine pressure from altitude or vice versa.
"""
from __future__ import division
from warnings import warn
import numpy as np
import pandas as pd
from pvlib._deprecation import deprecated
APPARENT_ZENITH_MODELS = ('simple', 'kasten1966', 'kastenyoung1989',
'gueymard1993', 'pickering2002')
TRUE_ZENITH_MODELS = ('youngirvine1967', 'young1994')
AIRMASS_MODELS = APPARENT_ZENITH_MODELS + TRUE_ZENITH_MODELS
def pres2alt(pressure):
'''
Determine altitude from site pressure.
Parameters
----------
pressure : numeric
Atmospheric pressure (Pascals)
Returns
-------
altitude : numeric
Altitude in meters above sea level
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kgK)
Relative Humidity 0%
============================ ================
References
-----------
[1] "A Quick Derivation relating altitude to air pressure" from
Portland State Aerospace Society, Version 1.03, 12/22/2004.
'''
alt = 44331.5 - 4946.62 * pressure ** (0.190263)
return alt
def alt2pres(altitude):
'''
Determine site pressure from altitude.
Parameters
----------
altitude : numeric
Altitude in meters above sea level
Returns
-------
pressure : numeric
Atmospheric pressure (Pascals)
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kgK)
Relative Humidity 0%
============================ ================
References
-----------
[1] "A Quick Derivation relating altitude to air pressure" from
Portland State Aerospace Society, Version 1.03, 12/22/2004.
'''
press = 100 * ((44331.514 - altitude) / 11880.516) ** (1 / 0.1902632)
return press
def get_absolute_airmass(airmass_relative, pressure=101325.):
'''
Determine absolute (pressure corrected) airmass from relative
airmass and pressure
Gives the airmass for locations not at sea-level (i.e. not at
standard pressure). The input argument "AMrelative" is the relative
airmass. The input argument "pressure" is the pressure (in Pascals)
at the location of interest and must be greater than 0. The
calculation for absolute airmass is
.. math::
absolute airmass = (relative airmass)*pressure/101325
Parameters
----------
airmass_relative : numeric
The airmass at sea-level.
pressure : numeric, default 101325
The site pressure in Pascal.
Returns
-------
airmass_absolute : numeric
Absolute (pressure corrected) airmass
References
----------
[1] <NAME>, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
'''
airmass_absolute = airmass_relative * pressure / 101325.
return airmass_absolute
absoluteairmass = deprecated('0.6', alternative='get_absolute_airmass',
name='absoluteairmass', removal='0.7')(
get_absolute_airmass)
def get_relative_airmass(zenith, model='kastenyoung1989'):
'''
Gives the relative (not pressure-corrected) airmass.
Gives the airmass at sea-level when given a sun zenith angle (in
degrees). The ``model`` variable allows selection of different
airmass models (described below). If ``model`` is not included or is
not valid, the default model is 'kastenyoung1989'.
Parameters
----------
zenith : numeric
Zenith angle of the sun in degrees. Note that some models use
the apparent (refraction corrected) zenith angle, and some
models use the true (not refraction-corrected) zenith angle. See
model descriptions to determine which type of zenith angle is
required. Apparent zenith angles must be calculated at sea level.
model : string, default 'kastenyoung1989'
Available models include the following:
* 'simple' - secant(apparent zenith angle) -
Note that this gives -inf at zenith=90
* 'kasten1966' - See reference [1] -
requires apparent sun zenith
* 'youngirvine1967' - See reference [2] -
requires true sun zenith
* 'kastenyoung1989' - See reference [3] -
requires apparent sun zenith
* 'gueymard1993' - See reference [4] -
requires apparent sun zenith
* 'young1994' - See reference [5] -
requries true sun zenith
* 'pickering2002' - See reference [6] -
requires apparent sun zenith
Returns
-------
airmass_relative : numeric
Relative airmass at sea level. Will return NaN values for any
zenith angle greater than 90 degrees.
References
----------
[1] <NAME>. "A New Table and Approximation Formula for the
Relative Optical Air Mass". Technical Report 136, Hanover, N.H.:
U.S. Army Material Command, CRREL.
[2] <NAME> and <NAME>, "Multicolor Photoelectric
Photometry of the Brighter Planets," The Astronomical Journal, vol.
72, pp. 945-950, 1967.
[3] <NAME> and <NAME>. "Revised optical air mass tables
and approximation formula". Applied Optics 28:4735-4738
[4] <NAME>, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
[5] <NAME>, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
pp. 1108-1110, Feb 1994.
[6] <NAME>. "The Ancient Star Catalog". DIO 12:1, 20,
[7] <NAME>, <NAME> and <NAME>, "Global
Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
Sandia Report, (2012).
'''
# need to filter first because python 2.7 does not support raising a
# negative number to a negative power.
z = np.where(zenith > 90, np.nan, zenith)
zenith_rad = np.radians(z)
model = model.lower()
if 'kastenyoung1989' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.50572*(((6.07995 + (90 - z)) ** - 1.6364))))
elif 'kasten1966' == model:
am = 1.0 / (np.cos(zenith_rad) + 0.15*((93.885 - z) ** - 1.253))
elif 'simple' == model:
am = 1.0 / np.cos(zenith_rad)
elif 'pickering2002' == model:
am = (1.0 / (np.sin(np.radians(90 - z +
244.0 / (165 + 47.0 * (90 - z) ** 1.1)))))
elif 'youngirvine1967' == model:
sec_zen = 1.0 / np.cos(zenith_rad)
am = sec_zen * (1 - 0.0012 * (sec_zen * sec_zen - 1))
elif 'young1994' == model:
am = ((1.002432*((np.cos(zenith_rad)) ** 2) +
0.148386*(np.cos(zenith_rad)) + 0.0096467) /
(np.cos(zenith_rad) ** 3 +
0.149864*(np.cos(zenith_rad) ** 2) +
0.0102963*(np.cos(zenith_rad)) + 0.000303978))
elif 'gueymard1993' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.00176759*(z)*((94.37515 - z) ** - 1.21563)))
else:
raise ValueError('%s is not a valid model for relativeairmass', model)
if isinstance(zenith, pd.Series):
am = pd.Series(am, index=zenith.index)
return am
relativeairmass = deprecated('0.6', alternative='get_relative_airmass',
name='relativeairmass', removal='0.7')(
get_relative_airmass)
def gueymard94_pw(temp_air, relative_humidity):
r"""
Calculates precipitable water (cm) from ambient air temperature (C)
and relatively humidity (%) using an empirical model. The
accuracy of this method is approximately 20% for moderate PW (1-3
cm) and less accurate otherwise.
The model was developed by expanding Eq. 1 in [2]_:
.. math::
w = 0.1 H_v \rho_v
using Eq. 2 in [2]_
.. math::
\rho_v = 216.7 R_H e_s /T
:math:`H_v` is the apparant water vapor scale height (km). The
expression for :math:`H_v` is Eq. 4 in [2]_:
.. math::
H_v = 0.4976 + 1.5265*T/273.15 + \exp(13.6897*T/273.15 - 14.9188*(T/273.15)^3)
:math:`\rho_v` is the surface water vapor density (g/m^3). In the
expression :math:`\rho_v`, :math:`e_s` is the saturation water vapor
pressure (millibar). The
expression for :math:`e_s` is Eq. 1 in [3]_
.. math::
e_s = \exp(22.330 - 49.140*(100/T) - 10.922*(100/T)^2 - 0.39015*T/100)
Parameters
----------
temp_air : numeric
ambient air temperature at the surface (C)
relative_humidity : numeric
relative humidity at the surface (%)
Returns
-------
pw : numeric
precipitable water (cm)
References
----------
.. [1] <NAME> and <NAME>, Accurate Measurement, Using Natural
Sunlight, of Silicon Solar Cells, Prog. in Photovoltaics: Res.
and Appl. 2004, vol 12, pp. 1-19 (:doi:`10.1002/pip.517`)
.. [2] <NAME>, Analysis of Monthly Average Atmospheric Precipitable
Water and Turbidity in Canada and Northern United States,
Solar Energy vol 53(1), pp. 57-71, 1994.
.. [3] <NAME>, Assessment of the Accuracy and Computing Speed of
simplified saturation vapor equations using a new reference
dataset, J. of Applied Meteorology 1993, vol. 32(7), pp.
1294-1300.
"""
T = temp_air + 273.15 # Convert to Kelvin # noqa: N806
RH = relative_humidity # noqa: N806
theta = T / 273.15
# Eq. 1 from Keogh and Blakers
pw = (
0.1 *
(0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) *
(216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) -
10.922*(100/T)**2 - 0.39015*T/100)))
pw = np.maximum(pw, 0.1)
return pw
def first_solar_spectral_correction(pw, airmass_absolute, module_type=None,
coefficients=None):
r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure corrected) airmass.
Estimates a spectral mismatch modifier M representing the effect on
module short circuit current of variation in the spectral
irradiance. M is estimated from absolute (pressure currected) air
mass, AMa, and precipitable water, Pwat, using the following
function:
.. math::
M = c_1 + c_2*AMa + | |
<reponame>boshuda/comic-rack-scraper
'''
This module contains the ComicBook class, which represents a comic book
from ComicRack that we are scraping data into.
@author: <NAME>
'''
from dbmodels import IssueRef, SeriesRef
from pluginbookdata import PluginBookData
from time import strftime
from utils import sstr, is_number
import clr
import db
import fnameparser
import log
import re
import utils
from bookdata import BookData
clr.AddReference('System')
from System.IO import Path
from System.Security.Cryptography import MD5
from System.Text import Encoding
#==============================================================================
class ComicBook(object):
'''
This class is a wrapper for the ComicRack ComicBook object, which adds
additional, scraper-oriented functionality to the object, and provides
read-only access to some of its data.
'''
# This is the magic 'tag' string we use (in the "Tags" or "Notes" fields of
# this comic book) to denote that this comic should always be skipped
# automatically, instead of scraped. Despite the CV in the name,
# this is a database independent magic value.
CVDBSKIP = 'CVDBSKIP'
#===========================================================================
def __init__(self, crbook, scraper):
'''
Initializes this ComicBook object based on an underlying ComicRack
ComicBook object (the crbook) parameter and the the given ScrapeEngine.
'''
self.__scraper = scraper;
self.__bookdata = PluginBookData(crbook, scraper)
self.__parse_extra_details_from_path()
#===========================================================================
# Series name of this book. Not None, may be empty.
series_s = property( lambda self : self.__bookdata.series_s )
# Issue number (string) of this book. Not None, may be empty.
issue_num_s = property( lambda self : self.__bookdata.issue_num_s )
# Volume (start year) of this book as an int >= -1, where -1 is unknown.
volume_year_n = property( lambda self : self.__bookdata.volume_year_n )
# Publication year of this book, as an int >= -1, where -1 is unknown
pub_year_n = property( lambda self : self.__bookdata.pub_year_n )
# Release year of this book, as an int >= -1, where -1 is unknown
rel_year_n = property( lambda self : self.__bookdata.rel_year_n )
# The format of this book (giant, annual, etc.) Not None, may be empty.
format_s = property( lambda self : self.__bookdata.format_s )
# The underlying path for this book (full path, including extension),
# or "" if it is a fileless book. Will never be None.
path_s = property(lambda self : self.__bookdata.path_s )
# The number of pages in this book, an integer >= 0.
page_count_n = property( lambda self : self.__bookdata.page_count_n )
# the unique id string associated with this comic book's series. all comic
# books that appear to be from the same series will have the same id string,
# which will be different for each series. will not be null or None.
unique_series_s = property( lambda self : self.__unique_series_s() )
# an IssueRef object identifying this book in the database, if available.
# will be None if not available, which is always the case for books that
# haven't been scraped before.
issue_ref = property( lambda self : None if
self.__extract_issue_ref() == 'skip' else self.__extract_issue_ref() )
# a SeriesRef object identifying this book's series in the database, if
# available. will be None if not available, which is always the case for
# books that haven't been scraped before.
series_ref = property( lambda self : self.__extract_series_ref() )
# true if this book as has been marked to "skip forever" (the scraper should
# silently skip this book if this value is true, regardless of self.issue_ref
skip_b = property( lambda self : self.__extract_issue_ref() == 'skip' )
#==========================================================================
def create_image_of_page(self, page_index):
'''
Retrieves an COPY of a single page (a .NET "Image" object) for this
ComicBook. Returns None if the requested page could not be obtained.
page_index --> the index of the page to retrieve; a value on the range
[0, n-1], where n is self.page_count_n.
'''
return self.__bookdata.create_image_of_page(page_index)
#===========================================================================
def skip_forever(self):
'''
This method causes this book to be marked with the magic CVDBSKIP
flag, which means that from now on, self.issue_ref will always be
"skip", which tells the scraper to automatically skip over this book
without even asking the user.
'''
# try to make everyone happy here: if notes and tags "rescrape saving"
# are both turned on, or both turned off, then this command should just
# write CVDBSKIP to both of them (users who turn off both still might
# want CVDBSKIP to work!) otherwise, use the values of these 2 prefs to
# determine which fields to write the CVDBSKIP to.
bd = self.__bookdata
notes = self.__scraper.config.rescrape_notes_b
tags = self.__scraper.config.rescrape_tags_b
if notes == tags or tags:
bd.tags_sl = self.__add_key_to_tags(bd.tags_sl, None);
log.debug("Added ", ComicBook.CVDBSKIP, " flag to comic book 'Tags'")
if notes == tags or notes:
bd.notes_s =self.__add_key_to_notes(bd.notes_s, None)
log.debug("Added ", ComicBook.CVDBSKIP, " flag to comic book 'Notes'")
bd.update()
# =============================================================================
def __extract_issue_ref(self):
'''
This method attempts to rebuild the IssueRef that the user chose the
last time that they scraped this comic. If it can do so, it will
return that IssueRef. If not, it will return None, or the
string "skip" (see below).
If the user has manually added the magic CVDBSKIP flag to the tags or
notes for this book, then this method will return the string "skip",
which should be interpreted as "never scrape this book".
'''
# in this method, its easier to work with tags as a single string
bd = self.__bookdata
tagstring = ', '.join(bd.tags_sl)
# check for the magic CVDBSKIP skip flag
skip_found = re.search(r'(?i)'+ComicBook.CVDBSKIP, tagstring)
if not skip_found:
skip_found = re.search(r'(?i)'+ComicBook.CVDBSKIP, bd.notes_s)
retval = "skip" if skip_found else None
if retval is None:
# if no skip tag, see if there's a key tag in the tags or notes
issue_key = db.parse_key_tag(tagstring)
if issue_key == None:
issue_key = db.parse_key_tag(bd.notes_s)
if issue_key == None:
issue_key = int(bd.issue_key_s) if \
utils.is_number(bd.issue_key_s) else None
if issue_key != None:
# found a key tag! convert to an IssueRef
retval = IssueRef(self.issue_num_s, issue_key,
self.__bookdata.title_s, self.__bookdata.cover_url_s);
return retval
# =============================================================================
def __extract_series_ref(self):
'''
This method attempts to rebuild the SeriesRef that the user chose the
last time that they scraped this comic. If it can do so, it will
return that SeriesRef, otherwise it will return None.
'''
# in this method, its easier to work with tags as a single string
bd = self.__bookdata
retval = None
series_key = int(bd.series_key_s) if \
utils.is_number(bd.series_key_s) else None
if series_key != None:
# found a key tag! convert to a sparse SeriesRef
retval = SeriesRef(series_key, None, -1, '', -1, None);
return retval
#==========================================================================
def __unique_series_s(self):
'''
Gets the unique series name for this ComicBook. This is a special
string that will be identical for (and only for) any comic books that
"appear" to be from the same series.
The unique series name is meant to be used internally (i.e. the key for
a map, or for grouping ComicBooks), not for displaying to users.
This value is NOT the same as the series_s property.
'''
bd = self.__bookdata
sname = '' if not bd.series_s else bd.series_s
if sname and bd.format_s:
sname += bd.format_s
sname = re.sub('\W+', '', sname).lower()
svolume = ''
if sname:
if bd.volume_year_n and bd.volume_year_n > 0:
svolume = sstr(bd.volume_year_n)
else:
# if we can't find a name at all (very weird), fall back to the
# memory ID, which is be unique and thus ensures that this
# comic doesn't get lumped in to the same series choice as any
# other unnamed comics!
sname = "uniqueid-" + utils.sstr(id(self))
# generate a hash to add onto the string. the hash should be identical
# for all comics that belong to the same series, and different otherwise.
# not how | |
<reponame>Jeansding/Malaya<filename>malaya/summarize.py<gh_stars>1-10
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import numpy as np
import re
import random
from scipy.linalg import svd
from operator import itemgetter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.decomposition import NMF, LatentDirichletAllocation
from .texts._text_functions import (
summary_textcleaning,
classification_textcleaning,
STOPWORDS,
split_by_dot,
)
from .stem import sastrawi
from ._models import _skip_thought
from .cluster import cluster_words
class _DEEP_SUMMARIZER:
def __init__(
self, sess, x, logits, attention, dictionary, maxlen, model = None
):
self._sess = sess
self._X = x
self._logits = logits
self._attention = attention
self.dictionary = dictionary
self._maxlen = maxlen
self._rev_dictionary = {v: k for k, v in self.dictionary.items()}
self._model = model
def vectorize(self, corpus):
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
sequences = _skip_thought.batch_sequence(
corpus, self.dictionary, maxlen = self._maxlen
)
return self._sess.run(
self._logits, feed_dict = {self._X: np.array(sequences)}
)
def summarize(
self, corpus, top_k = 3, important_words = 3, return_cluster = True
):
"""
Summarize list of strings / corpus
Parameters
----------
corpus: str, list
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
Returns
-------
string: summarized string
"""
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
sequences = _skip_thought.batch_sequence(
corpus, self.dictionary, maxlen = self._maxlen
)
encoded, attention = self._sess.run(
[self._logits, self._attention],
feed_dict = {self._X: np.array(sequences)},
)
attention = attention.sum(axis = 0)
kmeans = KMeans(n_clusters = top_k, random_state = 0)
kmeans = kmeans.fit(encoded)
avg = []
for j in range(top_k):
idx = np.where(kmeans.labels_ == j)[0]
avg.append(np.mean(idx))
closest, _ = pairwise_distances_argmin_min(
kmeans.cluster_centers_, encoded
)
indices = np.argsort(attention)[::-1]
top_words = [self._rev_dictionary[i] for i in indices[:important_words]]
ordering = sorted(range(top_k), key = lambda k: avg[k])
summarized = '. '.join([corpus[closest[idx]] for idx in ordering])
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
def deep_model_news():
"""
Load skip-thought summarization deep learning model trained on news dataset.
Returns
-------
_DEEP_SUMMARIZER: _DEEP_SUMMARIZER class
"""
sess, x, logits, attention, dictionary, maxlen = (
_skip_thought.news_load_model()
)
return _DEEP_SUMMARIZER(sess, x, logits, attention, dictionary, maxlen)
def deep_model_wiki():
"""
Load residual network with Bahdanau Attention summarization deep learning model trained on wikipedia dataset.
Returns
-------
_DEEP_SUMMARIZER: _DEEP_SUMMARIZER class
"""
print(
'WARNING: this model is using convolutional based, Tensorflow-GPU above 1.10 may got a problem. Please downgrade to Tensorflow-GPU v1.8 if got any cuDNN error.'
)
sess, x, logits, attention, dictionary, maxlen = (
_skip_thought.wiki_load_model()
)
return _DEEP_SUMMARIZER(sess, x, logits, attention, dictionary, maxlen)
def train_skip_thought(
corpus,
epoch = 5,
batch_size = 16,
embedding_size = 256,
maxlen = 50,
vocab_size = None,
stride = 1,
):
"""
Train a deep skip-thought network for summarization agent
Parameters
----------
corpus: str, list
epoch: int, (default=5)
iteration numbers
batch_size: int, (default=32)
batch size for every feed, batch size must <= size of corpus
embedding_size: int, (default=256)
vector size representation for a word
maxlen: int, (default=50)
max length of a string to be train
vocab_size: int, (default=None)
max vocabulary size, None for no limit
stride: int, (default=1)
stride size, skipping value for sentences
Returns
-------
_DEEP_SUMMARIZER: malaya.skip_thought._DEEP_SUMMARIZER class
"""
if not isinstance(epoch, int):
raise ValueError('epoch must be an integer')
if not isinstance(batch_size, int):
raise ValueError('batch_size must be an integer')
if not isinstance(embedding_size, int):
raise ValueError('embedding_size must be an integer')
if not isinstance(maxlen, int):
raise ValueError('maxlen must be an integer')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
t_range = int((len(corpus) - 3) / stride + 1)
left, middle, right = [], [], []
for i in range(t_range):
slices = corpus[i * stride : i * stride + 3]
left.append(slices[0])
middle.append(slices[1])
right.append(slices[2])
if batch_size > len(left):
raise ValueError('batch size must smaller with corpus size')
left, middle, right = shuffle(left, middle, right)
sess, model, dictionary, _ = _skip_thought.train_model(
middle,
left,
right,
epoch = epoch,
batch_size = batch_size,
embedding_size = embedding_size,
maxlen = maxlen,
vocab_size = vocab_size,
)
return _DEEP_SUMMARIZER(
sess,
model.INPUT,
model.get_thought,
model.attention,
dictionary,
maxlen,
model = model,
)
def lsa(
corpus,
maintain_original = False,
ngram = (1, 3),
min_df = 2,
top_k = 3,
important_words = 3,
return_cluster = True,
**kwargs
):
"""
summarize a list of strings using LSA.
Parameters
----------
corpus: list
maintain_original: bool, (default=False)
If False, will apply malaya.text_functions.classification_textcleaning
ngram: tuple, (default=(1,3))
n-grams size to train a corpus
min_df: int, (default=2)
minimum document frequency for a word
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
return_cluster: bool, (default=True)
if True, will cluster important_words to similar texts
Returns
-------
dictionary: result
"""
if not isinstance(maintain_original, bool):
raise ValueError('maintain_original must be a boolean')
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(ngram, tuple):
raise ValueError('ngram must be a tuple')
if not len(ngram) == 2:
raise ValueError('ngram size must equal to 2')
if not isinstance(min_df, int) or isinstance(min_df, float):
raise ValueError('min_df must be an integer or a float')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
splitted_fullstop = [summary_textcleaning(i) for i in corpus]
splitted_fullstop = [
classification_textcleaning(i) if not maintain_original else i
for i in splitted_fullstop
if len(i)
]
stemmed = [sastrawi(i) for i in splitted_fullstop]
tfidf = TfidfVectorizer(
ngram_range = ngram, min_df = min_df, stop_words = STOPWORDS, **kwargs
).fit(stemmed)
U, S, Vt = svd(tfidf.transform(stemmed).todense().T, full_matrices = False)
summary = [
(splitted_fullstop[i], np.linalg.norm(np.dot(np.diag(S), Vt[:, b]), 2))
for i in range(len(splitted_fullstop))
for b in range(len(Vt))
]
summary = sorted(summary, key = itemgetter(1))
summary = dict(
(v[0], v) for v in sorted(summary, key = lambda summary: summary[1])
).values()
summarized = '. '.join([a for a, b in summary][len(summary) - (top_k) :])
indices = np.argsort(tfidf.idf_)[::-1]
features = tfidf.get_feature_names()
top_words = [features[i] for i in indices[:important_words]]
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
def nmf(
corpus,
maintain_original = False,
ngram = (1, 3),
min_df = 2,
top_k = 3,
important_words = 3,
return_cluster = True,
**kwargs
):
"""
summarize a list of strings using NMF.
Parameters
----------
corpus: list
maintain_original: bool, (default=False)
If False, will apply malaya.text_functions.classification_textcleaning
ngram: tuple, (default=(1,3))
n-grams size to train a corpus
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
min_df: int, (default=2)
minimum document frequency for a word
return_cluster: bool, (default=True)
if True, will cluster important_words to similar texts
Returns
-------
dictionary: result
"""
if not isinstance(maintain_original, bool):
raise ValueError('maintain_original must be a boolean')
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
| |
<reponame>SamPaskewitz/statsrat
import numpy as np
import pandas as pd
import xarray as xr
from scipy import stats
from plotnine import *
import nlopt
def multi_sim(model, trials_list, par_val, random_resp = False, sim_type = None):
"""
Simulate one or more trial sequences from the same schedule with known parameters.
Parameters
----------
model : object
Model to use.
trials_list : list
List of time step level experimental data (cues, outcomes
etc.) for each participant. These should be generated from
the same experimental schedule.
par_val : list
Learning model parameters (floats or ints).
random_resp : boolean
Should responses be random?
sim_type: str or None, optional
Type of simulation to perform (passed to the model's .simulate() method).
Should be a string indicating the type of simulation if there is more than
one type (e.g. latent cause models), and otherwise should be None.
Defaults to None.
Returns
-------
ds : dataset
"""
n_sim = len(trials_list)
ds_list = []
if sim_type is None:
for i in range(n_sim):
ds_new = model.simulate(trials = trials_list[i],
par_val = par_val,
random_resp = random_resp,
ident = 'sim_' + str(i))
ds_list += [ds_new]
else:
for i in range(n_sim):
ds_new = model.simulate(trials = trials_list[i],
par_val = par_val,
random_resp = random_resp,
ident = 'sim_' + str(i),
sim_type = sim_type)
ds_list += [ds_new]
ds = xr.combine_nested(ds_list, concat_dim = ['ident'])
return ds
def log_lik(model, ds, par_val):
"""
Compute log-likelihood of individual time step data.
Parameters
----------
model : object
A learning model object.
ds : dataset
Experimental data, including cues, behavioral responses,
outcomes etc. from one individual and schedule.
par_val : list
Learning model parameters (floats or ints).
Returns
-------
ll : float
Log-likelihood of the data given parameter values.
"""
# For now, this assumes discrete choice data (i.e. resp_type = 'choice')
# 'b' has the same dimensions as 'b_hat' with 0 for choices not made and 1 for choices made
sim_ds = model.simulate(ds, par_val = par_val) # run simulation
b_hat = np.array(sim_ds['b_hat'])
b_hat[b_hat == 0] = 0.00000001
log_prob = np.log(b_hat) # logarithms of choice probabilities
resp = np.array(ds['b'])
ll = np.sum(log_prob*resp) # log-likelihood of choice sequence
return ll
def perform_oat(model, experiment, minimize = True, oat = None, n = 5, max_time = 60, verbose = False, algorithm = nlopt.GN_ORIG_DIRECT, sim_type = None):
"""
Perform an ordinal adequacy test (OAT).
Parameters
----------
model: learning model object
experiment: experiment
minimize: boolean, optional
Should the OAT score by minimized as well as maximized?
Defaults to True.
oat: str or None, optional
Name of the OAT to use. Defaults to None, in which
case the alphabetically first OAT in the experiment.
n: int, optional
Number of individuals to simulate. Defaults to 5.
max_time: int, optional
Maximum time for each optimization (in seconds), i.e.
about half the maximum total time running the whole OAT should take.
Defaults to 60.
verbose: boolean, optional
Should the parameter values be printed as the search is going on?
Defaults to False.
algorithm: object, optional
NLopt algorithm to use for optimization.
Defaults to nlopt.GN_ORIG_DIRECT.
sim_type: str or None, optional
Type of simulation to perform (passed to the model's .simulate() method).
Should be a string indicating the type of simulation if there is more than
one type (e.g. latent cause models), and otherwise should be None.
Defaults to None.
Returns
-------
output: dataframe (Pandas)
Model parameters that produce maximum and minimum mean OAT score,
along with those maximum and minimum mean OAT scores and (if n > 1)
their associated 95% confidence intervals.
mean_resp_max: dataframe
Relevant responses at OAT maximum (and minimum if applicable), averaged
across individuals and trials.
Notes
-----
The experiment's OAT object defines a behavioral score function
designed such that positive values reflect response patterns
consistent with empirical data and negative values reflect the
opposite. This method maximizes and minimizes the score produced
by the learning model. If the maximum score is positive, the model
CAN behave reproduce empirical results. If the minimum score is
also positive, the model ALWAYS reproduces those results.
"""
# determine which OAT to use
if oat is None:
oat_used = experiment.oats[list(experiment.oats.keys())[0]]
else:
oat_used = experiment.oats[oat]
# make a list of all schedules (groups) to simulate
if oat_used.schedule_neg is None:
s_list = oat_used.schedule_pos
else:
s_list = oat_used.schedule_pos + oat_used.schedule_neg
# for each schedule, create a list of trial sequences to use in simulations
trials_list = dict(keys = s_list)
for s in s_list:
new = []
for j in range(n):
new += [experiment.make_trials(schedule = s)]
trials_list[s] = new
# set up parameter space
par_names = model.pars.index.tolist()
free_names = par_names.copy()
if 'resp_scale' in free_names: # get rid of resp_scale as a free parameter (it's fixed at 5)
free_names.remove('resp_scale') # modifies list in place
n_free = len(free_names) # number of free parameters
free_pars = model.pars.loc[free_names] # free parameters
mid_pars = (free_pars['max'] + free_pars['min'])/2 # midpoint of each parameter's allowed interval
# set up objective function
if 'resp_scale' in par_names:
if verbose:
def f(x, grad = None):
if grad.size > 0:
grad = None
par_val = np.append(x, 5)
print(par_val)
sim_data = {}
for s in s_list:
sim_data[s] = multi_sim(model, trials_list[s], par_val, random_resp = False, sim_type = sim_type)
oat_total = oat_used.compute_total(data = sim_data)
return oat_total
else:
def f(x, grad = None):
if grad.size > 0:
grad = None
par_val = np.append(x, 5)
sim_data = {}
for s in s_list:
sim_data[s] = multi_sim(model, trials_list[s], par_val, random_resp = False, sim_type = sim_type)
oat_total = oat_used.compute_total(data = sim_data)
return oat_total
else:
if verbose:
def f(x, grad = None):
if grad.size > 0:
grad = None
par_val = x
print(par_val)
sim_data = {}
for s in s_list:
sim_data[s] = multi_sim(model, trials_list[s], par_val, random_resp = False, sim_type = sim_type)
oat_total = oat_used.compute_total(data = sim_data)
return oat_total
else:
def f(x, grad = None):
if grad.size > 0:
grad = None
par_val = x
sim_data = {}
for s in s_list:
sim_data[s] = multi_sim(model, trials_list[s], par_val, random_resp = False, sim_type = sim_type)
oat_total = oat_used.compute_total(data = sim_data)
return oat_total
# maximize the OAT score
print('Maximizing OAT score.')
# global optimization (to find approximate optimum)
gopt_max = nlopt.opt(algorithm, n_free)
gopt_max.set_max_objective(f)
gopt_max.set_lower_bounds(np.array(free_pars['min'] + 0.001))
gopt_max.set_upper_bounds(np.array(free_pars['max'] - 0.001))
gopt_max.set_maxtime(max_time/2)
par_max_aprx = gopt_max.optimize(mid_pars)
# local optimization (to refine answer)
lopt_max = nlopt.opt(nlopt.LN_SBPLX, n_free)
lopt_max.set_max_objective(f)
lopt_max.set_lower_bounds(np.array(free_pars['min'] + 0.001))
lopt_max.set_upper_bounds(np.array(free_pars['max'] - 0.001))
lopt_max.set_maxtime(max_time/2)
par_max = lopt_max.optimize(par_max_aprx)
if minimize:
# minimize the OAT score
print('Minimizing OAT score.')
# global optimization
gopt_min = nlopt.opt(algorithm, n_free)
gopt_min.set_min_objective(f)
gopt_min.set_lower_bounds(np.array(free_pars['min'] + 0.001))
gopt_min.set_upper_bounds(np.array(free_pars['max'] - 0.001))
gopt_min.set_maxtime(max_time/2)
par_min_aprx = gopt_min.optimize(mid_pars)
# local optimization (to refine answer)
lopt_min = nlopt.opt(nlopt.LN_SBPLX, n_free)
lopt_min.set_min_objective(f)
lopt_min.set_lower_bounds(np.array(free_pars['min'] + 0.001))
lopt_min.set_upper_bounds(np.array(free_pars['max'] - 0.001))
lopt_min.set_maxtime(max_time/2)
par_min = lopt_min.optimize(par_min_aprx)
# simulate data to compute resulting OAT scores at max and min
par_names = model.pars.index.tolist()
min_data = dict(keys = s_list)
max_data = dict(keys = s_list)
if 'resp_scale' in par_names:
for s in s_list:
max_data[s] = multi_sim(model, trials_list[s], np.append(par_max, 5), random_resp = False, sim_type = sim_type)
if minimize:
min_data[s] = multi_sim(model, trials_list[s], np.append(par_min, 5), random_resp = False, sim_type = sim_type)
else:
for s in s_list:
max_data[s] = multi_sim(model, trials_list[s], par_max, random_resp = False, sim_type = sim_type)
if minimize:
min_data[s] = multi_sim(model, trials_list[s], par_min, random_resp = False, sim_type = sim_type)
# package results for output
output_dict = dict()
if n > 1:
if minimize:
min_conf = oat_used.conf_interval(data = min_data, conf_level = 0.95)
max_conf = oat_used.conf_interval(data = max_data, conf_level = 0.95)
for i in range(n_free):
output_dict[free_names[i]] = [par_min[i], par_max[i]]
output_dict['mean'] = [min_conf['mean'], max_conf['mean']]
output_dict['lower'] = [min_conf['lower'], max_conf['lower']]
output_dict['upper'] = [min_conf['upper'], max_conf['upper']]
index = ['min', 'max']
else:
max_conf = oat_used.conf_interval(data = max_data, conf_level = 0.95)
for i in range(n_free):
output_dict[free_names[i]] = [par_max[i]]
output_dict['mean'] = [max_conf['mean']]
output_dict['lower'] = [max_conf['lower']]
output_dict['upper'] = [max_conf['upper']]
index = ['max']
else:
if minimize:
min_value = oat_used.compute_total(data = min_data)
max_value = oat_used.compute_total(data = max_data)
for i in range(n_free):
output_dict[free_names[i]] = [par_min[i], par_max[i]]
output_dict['value'] = [min_value, max_value]
index = ['min', 'max']
else:
max_value = oat_used.compute_total(data = | |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# list_dbsystem_with_maintenance_in_tenancy.py
#
# @author: <NAME>
#
# Supports Python 3
#
# DISCLAIMER – This is not an official Oracle application, It does not supported by Oracle Support, It should NOT be used for utilization calculation purposes
##########################################################################
# Info:
# List all dbsystems and exadatas including maintenance in Tenancy
#
# Connectivity:
# Option 1 - User Authentication
# $HOME/.oci/config, please follow - https://docs.cloud.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
# OCI user part of ListDBSystemGroup group with below Policy rules:
# Allow group ListDBSystemGroup to inspect compartments in tenancy
# Allow group ListDBSystemGroup to inspect tenancies in tenancy
# Allow group ListDBSystemGroup to inspect db-systems in tenancy
# Allow group ListDBSystemGroup to inspect infrastructures in tenancy
#
# Option 2 - Instance Principle
# Compute instance part of DynListDBSystemGroup dynamic group with policy rules:
# Allow dynamic group DynListDBSystemGroup to inspect compartments in tenancy
# Allow dynamic group DynListDBSystemGroup to inspect tenancies in tenancy
# Allow dynamic group DynListDBSystemGroup to inspect db-systems in tenancy
# Allow dynamic group DynListDBSystemGroup to inspect exadata-infrastructures in tenancy
#
##########################################################################
# Modules Included:
# - oci.identity.IdentityClient
#
# APIs Used:
# - IdentityClient.list_compartments - Policy COMPARTMENT_INSPECT
# - IdentityClient.get_tenancy - Policy TENANCY_INSPECT
# - IdentityClient.list_region_subscriptions - Policy TENANCY_INSPECT
# - DatabaseClient.list_db_systems - Policy DB_SYSTEM_INSPECT
# - DatabaseClient.get_maintenance_run - Policy DB_SYSTEM_INSPECT
# - DatabaseClient.list_cloud_exadata_infrastructures - Policy EXADATA_INFRASTRUCTURES_INSPECT
# - DatabaseClient.list_cloud_vm_clusters - Policy EXADATA_INFRASTRUCTURES_INSPECT
##########################################################################
# Application Command line parameters
#
# -t config - Config file section to use (tenancy profile)
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
##########################################################################
from __future__ import print_function
import sys
import argparse
import datetime
import oci
import json
import os
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# check service error to warn instead of error
##########################################################################
def check_service_error(code):
return ('max retries exceeded' in str(code).lower() or
'auth' in str(code).lower() or
'notfound' in str(code).lower() or
code == 'Forbidden' or
code == 'TooManyRequests' or
code == 'IncorrectState' or
code == 'LimitExceeded'
)
##########################################################################
# Create signer for Authentication
# Input - config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
# check if file exist
if not os.path.isfile(env_config_file):
print("*** Config File " + env_config_file + " does not exist, Abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
oci.config.DEFAULT_LOCATION,
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##########################################################################
# Load compartments
##########################################################################
def identity_read_compartments(identity, tenancy):
print("Loading Compartments...")
try:
compartments = oci.pagination.list_call_get_all_results(
identity.list_compartments,
tenancy.id,
compartment_id_in_subtree=True
).data
# Add root compartment which is not part of the list_compartments
compartments.append(tenancy)
print(" Total " + str(len(compartments)) + " compartments loaded.")
return compartments
except Exception as e:
raise RuntimeError("Error in identity_read_compartments: " + str(e.args))
##########################################################################
# Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
cmd = parser.parse_args()
# Start print time info
start_time = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print_header("Running DB Systems Extract")
print("Written By <NAME>, June 2020, Updated March 2021")
print("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
# Identity extract compartments
config, signer = create_signer(cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
compartments = []
data = []
warnings = 0
tenancy = None
try:
print("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
if cmd.proxy:
identity.base_client.session.proxies = {'https': cmd.proxy}
tenancy = identity.get_tenancy(config["tenancy"]).data
regions = identity.list_region_subscriptions(tenancy.id).data
print("Tenant Name : " + str(tenancy.name))
print("Tenant Id : " + tenancy.id)
print("")
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
raise RuntimeError("\nError extracting compartments section - " + str(e))
##########################################################################
# load_database_maintatance
##########################################################################
def load_database_maintatance(database_client, maintenance_run_id, db_system_name):
try:
if not maintenance_run_id:
return {}
# oci.database.models.MaintenanceRun
mt = database_client.get_maintenance_run(maintenance_run_id).data
val = {'id': str(mt.id),
'display_name': str(mt.display_name),
'description': str(mt.description),
'lifecycle_state': str(mt.lifecycle_state),
'time_scheduled': str(mt.time_scheduled),
'time_started': str(mt.time_started),
'time_ended': str(mt.time_ended),
'target_resource_type': str(mt.target_resource_type),
'target_resource_id': str(mt.target_resource_id),
'maintenance_type': str(mt.maintenance_type),
'maintenance_subtype': str(mt.maintenance_subtype),
'maintenance_display': str(mt.display_name) + " ( " + str(mt.maintenance_type) + ", " + str(mt.maintenance_subtype) + ", " + str(mt.lifecycle_state) + " ), Scheduled: " + str(mt.time_scheduled)[0:16] + ((", Execution: " + str(mt.time_started)[0:16] + " - " + str(mt.time_ended)[0:16]) if str(mt.time_started) != 'None' else ""),
'maintenance_alert': ""
}
# If maintenance is less than 14 days
if mt.time_scheduled:
delta = mt.time_scheduled.date() - datetime.date.today()
if delta.days <= 14 and delta.days >= 0 and not mt.time_started:
val['maintenance_alert'] = "DBSystem Maintenance is in " + str(delta.days).ljust(2, ' ') + " days, on " + str(mt.time_scheduled)[0:16] + " for " + db_system_name
return val
except oci.exceptions.ServiceError:
print("m", end="")
return ""
except oci.exceptions.RequestException:
print("m", end="")
return ""
except Exception as e:
raise RuntimeError("\nError extracting database maintenance section - " + str(e))
##########################################################################
# load_database_maintatance_windows
##########################################################################
def load_database_maintatance_windows(maintenance_window):
try:
if not maintenance_window:
return {}
mw = maintenance_window
value = {
'preference': str(mw.preference),
'months': ", ".join([x.name for x in mw.months]) if mw.months else "",
'weeks_of_month': ", ".join([str(x) for x in mw.weeks_of_month]) if mw.weeks_of_month else "",
'hours_of_day': ", ".join([str(x) for x in mw.hours_of_day]) if mw.hours_of_day else "",
'days_of_week': ", ".join([str(x.name) for x in mw.days_of_week]) if mw.days_of_week else "",
'lead_time_in_weeks': str(mw.lead_time_in_weeks) if mw.lead_time_in_weeks else "",
}
value['display'] = str(mw.preference) if str(mw.preference) == "NO_PREFERENCE" else (str(mw.preference) + ": Months: " + value['months'] + ", Weeks: " + value['weeks_of_month'] + ", DOW: " + value['days_of_week'] + ", Hours: " + value['hours_of_day'] + ", Lead Weeks: " + value['lead_time_in_weeks'])
return value
except Exception as e:
raise RuntimeError("\nError handling Maintenance Window - " + str(e))
##########################################################################
# load_database_dbsystem
##########################################################################
def load_database_dbsystem(database_client, region_name, compartment):
try:
global warnings
global data
print(" Compartment " + (str(compartment.name) + "... ").ljust(35), end="")
cnt = 0
list_db_systems = []
try:
list_db_systems = oci.pagination.list_call_get_all_results(
database_client.list_db_systems,
compartment.id,
sort_by="DISPLAYNAME"
).data
except oci.exceptions.ServiceError as e:
if check_service_error(e.code):
warnings += 1
print("Warnings ")
return
raise
# loop on the db systems
# dbs = oci.database.models.DbSystemSummary
for dbs in list_db_systems:
if dbs.lifecycle_state == oci.database.models.DbSystemSummary.LIFECYCLE_STATE_TERMINATED or \
dbs.lifecycle_state == "MIGRATED":
continue
value = {
'region_name': region_name,
'compartment_name': str(compartment.name),
'compartment_id': str(compartment.id),
'id': str(dbs.id),
'display_name': str(dbs.display_name),
'shape': str(dbs.shape),
'lifecycle_state': str(dbs.lifecycle_state),
'data_storage_size_in_gbs': "" if dbs.data_storage_size_in_gbs is None else str(dbs.data_storage_size_in_gbs),
'availability_domain': str(dbs.availability_domain),
'cpu_core_count': str(dbs.cpu_core_count),
'node_count': ("" if dbs.node_count is None else str(dbs.node_count)),
'version': str(dbs.version),
'hostname': str(dbs.hostname),
'domain': str(dbs.domain),
'data_storage_percentage': str(dbs.data_storage_percentage),
'data_subnet_id': str(dbs.subnet_id),
'backup_subnet_id': str(dbs.backup_subnet_id),
'scan_dns_record_id': "" if dbs.scan_dns_record_id is None else str(dbs.scan_dns_record_id),
'listener_port': str(dbs.listener_port),
'cluster_name': "" if dbs.cluster_name is None else str(dbs.cluster_name),
'database_edition': str(dbs.database_edition),
'time_created': str(dbs.time_created),
'storage_management': "",
'sparse_diskgroup': str(dbs.sparse_diskgroup),
'reco_storage_size_in_gb': str(dbs.reco_storage_size_in_gb),
'last_maintenance_run': load_database_maintatance(database_client, dbs.last_maintenance_run_id, str(dbs.display_name) + " - " + str(dbs.shape)),
'next_maintenance_run': load_database_maintatance(database_client, dbs.next_maintenance_run_id, str(dbs.display_name) + " - " + str(dbs.shape)),
'maintenance_window': load_database_maintatance_windows(dbs.maintenance_window),
'defined_tags': [] if dbs.defined_tags is None else dbs.defined_tags,
'freeform_tags': [] if dbs.freeform_tags is None else dbs.freeform_tags
}
# storage_management
if dbs.db_system_options:
if dbs.db_system_options.storage_management:
value['storage_management'] = dbs.db_system_options.storage_management
# license model
if dbs.license_model == oci.database.models.DbSystem.LICENSE_MODEL_LICENSE_INCLUDED:
value['license_model'] = "INCL"
elif dbs.license_model == oci.database.models.DbSystem.LICENSE_MODEL_BRING_YOUR_OWN_LICENSE:
value['license_model'] = "BYOL"
else:
value['license_model'] = str(dbs.license_model)
# Edition
if dbs.database_edition == oci.database.models.DbSystem.DATABASE_EDITION_ENTERPRISE_EDITION:
value['database_edition_short'] = "EE"
elif dbs.database_edition == oci.database.models.DbSystem.DATABASE_EDITION_ENTERPRISE_EDITION_EXTREME_PERFORMANCE:
value['database_edition_short'] = "XP"
elif dbs.database_edition == oci.database.models.DbSystem.DATABASE_EDITION_ENTERPRISE_EDITION_HIGH_PERFORMANCE:
value['database_edition_short'] = "HP"
elif dbs.database_edition == oci.database.models.DbSystem.DATABASE_EDITION_STANDARD_EDITION:
value['database_edition_short'] = "SE"
else:
value['database_edition_short'] = dbs.database_edition
# add the data
cnt += 1
data.append(value)
# print dbsystems for the compartment
if cnt == 0:
print("(-)")
else:
| |
<reponame>domielias/PyQuery
from .settings import (
WHERE_SPECIAL_ARGUMENTS, AUTOMATIC_JOINS_PLACEHOLDER,
FIELD_FORMAT, SELECT_FORMAT, JOIN_CLAUSE_FORMAT, WHERE_CLAUSE_FORMAT, WHERE_AND_CONNECTOR_FORMAT,
WHERE_EQUAL_OPERATION_FORMAT, ORDER_BY_CLAUSE_FORMAT, ORDER_BY_ASC_FORMAT, ORDER_BY_DESC_FORMAT,
LIMIT_FORMAT,VALUE_STRING_FORMAT, VALUE_LIST_FORMAT, VALUE_TUPLE_FORMAT, VALUE_NULL_FORMAT, VALUE_DATETIME_FORMAT,
VALUE_SINGLE_QUOTE_FORMAT, DISTINCT_CLAUSE_FORMAT, FIELD_OR_TABLES_FORMAT
)
from datetime import datetime
import math
import random
class BaseQuery:
def __init__(self, on_table, engine):
self.engine = engine
self.on_table = on_table
# first value of the tuple is the original table name, the second, the value we are using
self.fields_table_relations={
'':dict(table_name=on_table, is_alias=False)
}
def _format_db_tables_names(self, value):
"""
A single key of fields_table_relations dict
Args:
value (dict) a singe dict of the fields_table_relations dict
"""
if value['is_alias']:
return value['table_name']
else:
return self._format_field_or_tables(value['table_name'])
def _format_field_or_tables(self, value):
return FIELD_OR_TABLES_FORMAT.format(value)
def __create_table_name_alias(self, table_name):
"""
Creates a new alias for the table
"""
alias = 'TA{}'.format(str(random.randint(1,100)))
invalid_aliases = [value['table_name'] for value in self.fields_table_relations.values()]
while alias in invalid_aliases:
alias = 'TA{}'.format(str(random.randint(1,100)))
return alias
def _get_table_name_or_alias(self, query_path, table_name):
if query_path in self.fields_table_relations:
return self.fields_table_relations[query_path]
if table_name in [value['table_name'] for value in self.fields_table_relations.values()]:
self.fields_table_relations[query_path] = {
'table_name': self.__create_table_name_alias(table_name),
'is_alias': True
}
else:
self.fields_table_relations[query_path] = {
'table_name': table_name,
'is_alias': False
}
return self.fields_table_relations[query_path]
def __format_joins(self, joins):
"""
Handle all of the joins it must do for the query to succeed
The user can set dynamic join_relations to get the right table. Let's go with the following example
>>> {
"form_value": {
"form": "dynamic_forms"
}
}
The above example means: "When you are making a join with the `form` field and the table is `form_value`, we use the value `dynamic_forms` instead""
WHAT?
Let's dig deeper:
When you do something like this:
>>> connection.query('form_value').filter(form__form__id=2).run()
Let's separate the string by each duble underscore, we get something like this: [form, form, id]
The first `form` is the name of the field in `form_value`, but this field is not from `form` database, instead
it is from `dynamic_forms`, we get the correct value on each join
SO we get something like this
INNER JOIN "dynamic_forms" ON "dyanmic_forms"."id" = "form_value"."form_id"
INNER JOIN "form" ON "form"."id" = "dynamic_forms"."form_id"
Look that the second field, correctly references to "form" table, so we don't need to set any join relation for
this field. Okay, but what if `dynamic_forms` `form` field references `foo` table?
We would do something like the following:
>>> {
"form_value": {
"form": "dynamic_forms"
}
"dynamic_forms": {
"form": "foo"
}
}
"""
to_table_join = self.fields_table_relations['']
reference_string_list = list()
for index, join in enumerate(joins):
# creates a reference of the path to the fields so something like
# depends_on__group__company and so on, with this path we can reuse the created aliases
reference_string_list.append(join)
reference_string = '__'.join(reference_string_list)
from_table_join = to_table_join
# automatically creates alias
to_table_join_name = self.join_relations.get(from_table_join['table_name'], {}).get(join, join)
to_table_join = self._get_table_name_or_alias(reference_string, to_table_join_name)
join_clause = JOIN_CLAUSE_FORMAT.format(
join=join,
from_table_join=self._format_db_tables_names(from_table_join),
to_table_join=FIELD_OR_TABLES_FORMAT.format(to_table_join_name),
to_table_join_name_or_alias=self._format_db_tables_names(to_table_join),
alias=to_table_join['table_name'] if to_table_join['is_alias'] else ''
)
if join_clause not in self.query_joins:
self.query_joins.append(join_clause)
return self._format_db_tables_names(to_table_join)
def _format_db_fields(self, value):
"""
Formats each database field based on a default VALUE_CLAUSE
"""
table_name = self._format_db_tables_names(self.fields_table_relations[''])
splitted_value = value.split(AUTOMATIC_JOINS_PLACEHOLDER)
if len(splitted_value) > 1:
# Handle automatic join operations
joins = splitted_value[:-1]
table_name = self.__format_joins(joins)
values_to_use = splitted_value[-2:]
value = FIELD_FORMAT.format(
table=table_name,
field=self._format_field_or_tables(values_to_use[-1])
)
return value
def format_db_values(self, value):
if type(value) == str:
value = VALUE_STRING_FORMAT.format(value.replace("'",VALUE_SINGLE_QUOTE_FORMAT))
if type(value) == list:
value = VALUE_LIST_FORMAT.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == datetime:
value= VALUE_STRING_FORMAT.format(value.strftime(VALUE_DATETIME_FORMAT))
if type(value) == tuple:
value = '{}'.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == self.__class__:
value = self.format_db_values(list(value))
if value == None:
value = VALUE_NULL_FORMAT
return value
class Insert(BaseQuery):
def bulk_insert(self, values, column_names=None):
"""
This is optimize to be quicker than insert, all your arguments EXCEPTS column names must be a list of values
To be easier you can use it like this:
>>> connection.query('form_value').bulk_insert(values=[[1,2], [3,4], [4,5]], column_names=['column_a', 'column_b'])
Use with the * for positional arguments
Args:
column_names (list): the column names as a list
Returns:
bool: returns True if everything went fine
"""
values = tuple(list(value) for value in values)
columns = column_names if column_names else self.columns
maximum_number_of_values_per_iteration = 999
iterations = math.ceil(len(values)/maximum_number_of_values_per_iteration)
self.engine.connect()
for iteration in range(0, iterations):
iteration_values = values[iteration*maximum_number_of_values_per_iteration : (iteration+1)*maximum_number_of_values_per_iteration]
query = self._format_insert(tuple(iteration_values), columns)
self.engine.execute(query)
self.engine.commit()
return True
def insert(self, **kwargs):
"""
Inserts an handful amount of data in the database
Returns:
[type]: [description]
"""
columns = kwargs.keys()
values = list(kwargs.values())
query = self._format_insert(values, columns)
print(query)
#self.engine.save(query)
return True
def _format_insert(self, values, columns):
INSERT_CLAUSE = 'INSERT INTO "{}" ({}) VALUES {}'
return INSERT_CLAUSE.format(
self.on_table,
', '.join(['"{}"'.format(column) for column in columns]),
self.format_db_values(values)
)
class Select(BaseQuery):
"""
Class responsible for handling select statements.
"""
def __init__(self, join_relations, *args, **kwargs):
self.join_relations = join_relations
self.query_select = ['*']
self.query_distinct = ''
self.query_orders = []
self.query_where = []
self.query_limit = ''
self.query_joins = []
super(Select, self).__init__(*args, **kwargs)
@property
def __get_query(self):
query = SELECT_FORMAT.format(
select=', '.join(self.query_select),
distinct=DISTINCT_CLAUSE_FORMAT,
froms=self.on_table
)
joins = '{} '.format(' '.join(self.query_joins)) if self.query_joins else ''
where = WHERE_CLAUSE_FORMAT.format(where_conditions=WHERE_AND_CONNECTOR_FORMAT.join(self.query_where)) if self.query_where else ''
orders = ORDER_BY_CLAUSE_FORMAT.format(order_by_conditions=', '.join(self.query_orders)) if self.query_orders else ''
limit = self.query_limit
query = query + joins + where + orders + limit
return query
@property
def query(self):
return self.__get_query
def first(self):
"""
Returns the first element of the query, sets limit as 1
"""
return self.limit(1)
def limit(self, number):
"""
Sets your desired limit to the query
Args:
number (int): the limit number
Returns:
self: this object so you can concatenate with other functions
"""
self.query_limit = LIMIT_FORMAT.format(num=number)
return self
def distinct(self):
self.query_distinct = DISTINCT_CLAUSE_FORMAT
return self
def select(self, *args, **kwargs):
"""
Expects the each column names as string. You can also make joins in your select using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').select('id').run()
>>> connection.query('example_db_name').select('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').select('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
Args:
flat (bool, optional): You can set flat=True if you are retrieving only one option field. Defaults to False
"""
# you can retrieve flat values so instead of tuples like this [(1,), (2,)]
# you get your results as a nice flat list like [1,2]
# this just works if you only set ONE argument in the select
self._flat = kwargs.get('flat', False) and len(args) == 1
# you can obviously have multiple selects, but everytime you do it resets the select clause
# so use just one
self.query_select = []
for value in args:
select_clause = self._format_db_fields(value)
if select_clause not in self.query_select:
self.query_select.append(select_clause)
return self
def filter(self, **kwargs):
"""
You need to define filters like the following example:
>>> connection.query('example_db_name').filter(id=2).run()
Or if you need to make any joins you define it like this:
>>> connection.query('example_db_name').filter(connectedfield__id=2).run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are making
where condition is the id on `connectedfield` table.
"""
for key, value in kwargs.items():
where_operation = WHERE_SPECIAL_ARGUMENTS.get(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[-1], WHERE_EQUAL_OPERATION_FORMAT)
if where_operation != WHERE_EQUAL_OPERATION_FORMAT:
key = AUTOMATIC_JOINS_PLACEHOLDER.join(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[:-1])
where_field = self._format_db_fields(key)
value = self.format_db_values(value)
if where_field not in self.query_where:
self.query_where.append(where_field + where_operation + str(value))
return self
def order_by(self, *args):
"""
Expects the each column names as string. You can also make joins in your order using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').order_by('id').run()
>>> connection.query('example_db_name').order_by('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').order_by('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
"""
if any([type(value) != str for value in args]):
raise TypeError('Your arguments MUST be str type')
for value in args:
asc_or_desc = ORDER_BY_ASC_FORMAT
if value[0] == '-':
asc_or_desc = ORDER_BY_DESC_FORMAT
value = value[1:]
order_clause = self._format_db_fields(value)
order_clause = '{} {}'.format(order_clause, asc_or_desc)
if order_clause not in self.query_orders:
self.query_orders.append(order_clause)
return self
def force(self):
"""
Runs a SELECT | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import shutil
import os
import glob
import numpy as np
from astropy import log
from astropy.logger import AstropyUserWarning
from astropy.tests.helper import remote_data
import pytest
from stingray.lightcurve import Lightcurve
import hendrics as hen
from hendrics.tests import _dummy_par
from hendrics.fold import HAS_PINT
from hendrics import (
fake,
fspec,
base,
calibrate,
create_gti,
exposure,
exvar,
io,
lcurve,
plot,
read_events,
rebin,
)
from hendrics.read_events import treat_event_file
from hendrics.io import HEN_FILE_EXTENSION, get_file_type
from hendrics.lcurve import lcurve_from_events
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
log.setLevel("DEBUG")
class TestLcurve:
"""Real unit tests."""
@classmethod
def setup_class(cls):
curdir = os.path.abspath(os.path.dirname(__file__))
cls.datadir = os.path.join(curdir, "data")
cls.fits_fileA = os.path.join(cls.datadir, "monol_testA.evt")
cls.new_filename = os.path.join(
cls.datadir, "monol_testA_nustar_fpma_ev" + HEN_FILE_EXTENSION
)
cls.calib_filename = os.path.join(
cls.datadir,
"monol_testA_nustar_fpma_ev_calib" + HEN_FILE_EXTENSION,
)
def test_treat_event_file_nustar(self):
from astropy.io.fits import Header
treat_event_file(self.fits_fileA, discard_calibration=True)
lcurve_from_events(self.new_filename)
newfile = os.path.join(
self.datadir, "monol_testA_nustar_fpma_lc" + HEN_FILE_EXTENSION
)
assert os.path.exists(newfile)
type, data = get_file_type(newfile)
assert type == "lc"
assert isinstance(data, Lightcurve)
Header.fromstring(data.header)
assert hasattr(data, "mjdref")
assert data.mjdref > 0
def test_treat_event_file_nustar_energy(self):
command = "{0} -r {1} --nproc 2".format(
self.new_filename, os.path.join(self.datadir, "test.rmf")
)
hen.calibrate.main(command.split())
lcurve_from_events(self.calib_filename, e_interval=[3, 50])
newfile = os.path.join(
self.datadir,
"monol_testA_nustar_fpma_E3-50_lc" + HEN_FILE_EXTENSION,
)
assert os.path.exists(newfile)
type, data = get_file_type(newfile)
assert type == "lc"
assert isinstance(data, Lightcurve)
assert hasattr(data, "mjdref")
assert data.mjdref > 0
class TestFullRun(object):
"""Test how command lines work.
Usually considered bad practice, but in this
case I need to test the full run of the codes, and files depend on each
other.
Inspired by http://stackoverflow.com/questions/5387299/python-unittest-testcase-execution-order
When command line is missing, uses some function calls
""" # NOQA
@classmethod
def setup_class(cls):
curdir = os.path.abspath(os.path.dirname(__file__))
cls.datadir = os.path.join(curdir, "data")
cls.ev_fileA = os.path.join(
cls.datadir, "monol_testA_nustar_fpma_ev" + HEN_FILE_EXTENSION
)
cls.ev_fileB = os.path.join(
cls.datadir, "monol_testB_nustar_fpmb_ev" + HEN_FILE_EXTENSION
)
cls.ev_fileAcal = os.path.join(
cls.datadir,
"monol_testA_nustar_fpma_ev_calib" + HEN_FILE_EXTENSION,
)
cls.ev_fileBcal = os.path.join(
cls.datadir,
"monol_testB_nustar_fpmb_ev_calib" + HEN_FILE_EXTENSION,
)
cls.par = _dummy_par("bubububu.par")
command = "{0} {1} --discard-calibration".format(
os.path.join(cls.datadir, "monol_testA.evt"),
os.path.join(cls.datadir, "monol_testB.evt"),
)
hen.read_events.main(command.split())
command = "{} {} -r {}".format(
os.path.join(
cls.datadir, "monol_testA_nustar_fpma_ev" + HEN_FILE_EXTENSION
),
os.path.join(
cls.datadir, "monol_testB_nustar_fpmb_ev" + HEN_FILE_EXTENSION
),
os.path.join(cls.datadir, "test.rmf"),
)
hen.calibrate.main(command.split())
def test_lcurve(self):
"""Test light curve production."""
from astropy.io.fits import Header
new_filename = os.path.join(
os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
)
command = (
"{0} -e {1} {2} --safe-interval "
"{3} {4} --nproc 2 -b 0.5 -o {5}"
).format(self.ev_fileAcal, 3, 50, 100, 300, new_filename)
hen.lcurve.main(command.split())
assert os.path.exists(new_filename)
lc = hen.io.load_lcurve(new_filename)
assert hasattr(lc, "header")
# Test that the header is correctly conserved
Header.fromstring(lc.header)
assert hasattr(lc, "gti")
gti_to_test = hen.io.load_events(self.ev_fileAcal).gti
assert np.allclose(gti_to_test, lc.gti)
def test_lcurve_B(self):
command = (
"{0} -e {1} {2} --safe-interval " "{3} {4} -b 0.5 -o {5}"
).format(
self.ev_fileBcal,
3,
50,
100,
300,
os.path.join(
self.datadir, "monol_testB_E3-50_lc" + HEN_FILE_EXTENSION
),
)
hen.lcurve.main(command.split())
assert os.path.exists(
os.path.join(
self.datadir, "monol_testB_E3-50_lc" + HEN_FILE_EXTENSION
)
)
def test_lcurve_noclobber(self):
input_file = self.ev_fileAcal
new_filename = os.path.join(
os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
)
with pytest.warns(AstropyUserWarning) as record:
command = ("{0} -o {1} --noclobber").format(
input_file, new_filename
)
hen.lcurve.main(command.split())
assert [
"File exists, and noclobber" in r.message.args[0] for r in record
]
def test_lcurve_split(self):
"""Test lc with gti-split option."""
command = "{0} {1} -g".format(self.ev_fileAcal, self.ev_fileBcal)
hen.lcurve.main(command.split())
new_filename = os.path.join(
self.datadir,
"monol_testA_nustar_fpma_gti000_lc" + HEN_FILE_EXTENSION,
)
assert os.path.exists(new_filename)
lc = hen.io.load_lcurve(new_filename)
gti_to_test = hen.io.load_events(self.ev_fileAcal).gti[0]
assert np.allclose(gti_to_test, lc.gti)
def test_fits_lcurve0(self):
"""Test light curves from FITS."""
lcurve_ftools_orig = os.path.join(self.datadir, "lcurveA.fits")
lcurve_ftools = os.path.join(
self.datadir, "lcurve_ftools_lc" + HEN_FILE_EXTENSION
)
command = "{0} --outfile {1}".format(
self.ev_fileAcal, os.path.join(self.datadir, "lcurve_lc")
)
hen.lcurve.main(command.split())
assert os.path.exists(
os.path.join(self.datadir, "lcurve_lc") + HEN_FILE_EXTENSION
)
command = "--fits-input {0} --outfile {1}".format(
lcurve_ftools_orig, lcurve_ftools
)
hen.lcurve.main(command.split())
with pytest.warns(AstropyUserWarning) as record:
command = command + " --noclobber"
hen.lcurve.main(command.split())
assert [
"File exists, and noclobber" in r.message.args[0] for r in record
]
def test_fits_lcurve1(self):
"""Test light curves from FITS."""
lcurve_ftools = os.path.join(
self.datadir, "lcurve_ftools_lc" + HEN_FILE_EXTENSION
)
lcurve_mp = os.path.join(
self.datadir, "lcurve_lc" + HEN_FILE_EXTENSION
)
lcdata_mp = hen.io.load_data(lcurve_mp)
lcdata_ftools = hen.io.load_data(lcurve_ftools)
lc_mp = lcdata_mp["counts"]
lenmp = len(lc_mp)
lc_ftools = lcdata_ftools["counts"]
lenftools = len(lc_ftools)
goodlen = min([lenftools, lenmp])
diff = lc_mp[:goodlen] - lc_ftools[:goodlen]
assert np.all(
np.abs(diff) <= 1e-3
), "Light curve data do not coincide between FITS and HEN"
def test_txt_lcurve(self):
"""Test light curves from txt."""
lcurve_mp = os.path.join(
self.datadir, "lcurve_lc" + HEN_FILE_EXTENSION
)
lcdata_mp = hen.io.load_data(lcurve_mp)
lc_mp = lcdata_mp["counts"]
time_mp = lcdata_mp["time"]
lcurve_txt_orig = os.path.join(self.datadir, "lcurve_txt_lc.txt")
hen.io.save_as_ascii([time_mp, lc_mp], lcurve_txt_orig)
lcurve_txt = os.path.join(
self.datadir, "lcurve_txt_lc" + HEN_FILE_EXTENSION
)
command = "--txt-input " + lcurve_txt_orig + " --outfile " + lcurve_txt
hen.lcurve.main(command.split())
lcdata_txt = hen.io.load_data(lcurve_txt)
lc_txt = lcdata_txt["counts"]
assert np.all(
np.abs(lc_mp - lc_txt) <= 1e-3
), "Light curve data do not coincide between txt and HEN"
with pytest.warns(AstropyUserWarning) as record:
command = command + " --noclobber"
hen.lcurve.main(command.split())
assert [
"File exists, and noclobber" in r.message.args[0] for r in record
]
def test_joinlcs(self):
"""Test produce joined light curves."""
new_filename = os.path.join(
self.datadir, "monol_test_joinlc" + HEN_FILE_EXTENSION
)
# because join_lightcurves separates by instrument
new_actual_filename = os.path.join(
self.datadir, "fpmamonol_test_joinlc" + HEN_FILE_EXTENSION
)
lcA_pattern = "monol_testA_nustar_fpma_gti[0-9][0-9][0-9]_lc*"
lcB_pattern = "monol_testB_nustar_fpmb_gti[0-9][0-9][0-9]_lc*"
hen.lcurve.join_lightcurves(
glob.glob(
os.path.join(self.datadir, lcA_pattern + HEN_FILE_EXTENSION)
)
+ glob.glob(
os.path.join(self.datadir, lcB_pattern + HEN_FILE_EXTENSION)
),
new_filename,
)
lc = hen.io.load_lcurve(new_actual_filename)
assert hasattr(lc, "gti")
gti_to_test = hen.io.load_events(self.ev_fileA).gti
assert np.allclose(gti_to_test, lc.gti)
def test_scrunchlcs(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
b_in = os.path.join(
self.datadir, "monol_testB_E3-50_lc" + HEN_FILE_EXTENSION
)
out = os.path.join(
self.datadir, "monol_test_scrunchlc" + HEN_FILE_EXTENSION
)
command = "{0} {1} -o {2}".format(a_in, b_in, out)
a_lc = hen.io.load_lcurve(a_in)
b_lc = hen.io.load_lcurve(b_in)
a_lc.apply_gtis()
b_lc.apply_gtis()
hen.lcurve.scrunch_main(command.split())
out_lc = hen.io.load_lcurve(out)
out_lc.apply_gtis()
assert np.all(out_lc.counts == a_lc.counts + b_lc.counts)
gti_to_test = hen.io.load_events(self.ev_fileA).gti
assert np.allclose(gti_to_test, out_lc.gti)
def testbaselinelc(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
out = os.path.join(self.datadir, "monol_test_baselc")
command = "{0} -o {1} -p 0.001 --lam 1e5".format(a_in, out)
hen.lcurve.baseline_main(command.split())
out_lc = hen.io.load_lcurve(out + "_0" + HEN_FILE_EXTENSION)
assert hasattr(out_lc, "base")
gti_to_test = hen.io.load_events(self.ev_fileA).gti
assert np.allclose(gti_to_test, out_lc.gti)
def testbaselinelc_nooutroot(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
command = "{0} -p 0.001 --lam 1e5".format(a_in)
hen.lcurve.baseline_main(command.split())
out_lc = hen.io.load_lcurve(
hen.base.hen_root(a_in) + "_lc_baseline" + HEN_FILE_EXTENSION
)
assert hasattr(out_lc, "base")
gti_to_test = hen.io.load_events(self.ev_fileA).gti
assert np.allclose(gti_to_test, out_lc.gti)
def test_lcurve_error_uncalibrated(self):
"""Test light curve error from uncalibrated file."""
command = ("{0} -e {1} {2}").format(
os.path.join(
self.datadir, "monol_testA_nustar_fpma_ev" + HEN_FILE_EXTENSION
),
3,
50,
)
with pytest.raises(ValueError) as excinfo:
hen.lcurve.main(command.split())
message = str(excinfo.value)
assert str(message).strip().endswith("Did you run HENcalibrate?")
def test_lcurve_pi_filtering(self):
"""Test light curve using PI filtering."""
command = ("{0} --pi-interval {1} {2}").format(
os.path.join(
self.datadir, "monol_testA_nustar_fpma_ev" + HEN_FILE_EXTENSION
),
10,
300,
)
hen.lcurve.main(command.split())
def test_rebinlc(self):
"""Test LC rebinning."""
command = "{0} -r 4".format(
os.path.join(self.datadir, "monol_testA_E3-50_lc")
+ HEN_FILE_EXTENSION
)
hen.rebin.main(command.split())
def test_save_fvar_from_lc(self):
fname = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
hen.exvar.main(
[fname, "-c", "10", "--fraction-step", "0.6", "--norm", "fvar"]
)
out = hen.base.hen_root(fname) + "_fvar" + ".qdp"
os.path.exists(out)
def test_save_excvar_from_lc(self):
fname = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
hen.exvar.main([fname])
out = hen.base.hen_root(fname) + "_excvar" + ".qdp"
os.path.exists(out)
def test_save_excvar_norm_from_lc(self):
fname = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
hen.exvar.main([fname, "--norm", "norm_excvar"])
out = hen.base.hen_root(fname) + "_norm_excvar" + ".qdp"
os.path.exists(out)
def test_save_excvar_wrong_norm_from_lc(self):
fname = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
with pytest.raises(ValueError) as excinfo:
hen.exvar.main([fname, "--norm", "cicciput"])
assert "Normalization must be fvar, " in str(excinfo.value)
def test_create_gti_lc(self):
"""Test creating a GTI file."""
fname = (
os.path.join(self.datadir, "monol_testA_E3-50_lc")
+ HEN_FILE_EXTENSION
)
command = "{0} -f counts>0 -c --debug".format(fname)
hen.create_gti.main(command.split())
def test_apply_gti_lc(self):
"""Test applying a GTI file."""
fname = (
os.path.join(self.datadir, "monol_testA_E3-50_gti")
+ HEN_FILE_EXTENSION
)
lcfname = (
os.path.join(self.datadir, "monol_testA_E3-50_lc")
+ HEN_FILE_EXTENSION
)
lcoutname = (
os.path.join(self.datadir, "monol_testA_E3-50_lc_gtifilt")
+ HEN_FILE_EXTENSION
)
command = "{0} -a {1} --debug".format(lcfname, fname)
hen.create_gti.main(command.split())
hen.io.load_lcurve(lcoutname)
def test_plot_lcurve_baseline(self):
a_in = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
base_file = (
hen.base.hen_root(a_in) + "_lc_baseline" + HEN_FILE_EXTENSION
)
hen.plot.main([base_file, "--noplot", "-o", "dummy_base.qdp"])
filedata = np.genfromtxt("dummy_base.qdp")
assert filedata.shape[1] == 3
def test_pds_fits(self):
"""Test PDS production with light curves obtained from FITS files."""
lcurve_ftools = os.path.join(
self.datadir, "lcurve_ftools_lc" + HEN_FILE_EXTENSION
)
command = "{0} --save-all -f 128".format(lcurve_ftools)
hen.fspec.main(command.split())
def test_pds_txt(self):
"""Test PDS production with light curves obtained from txt files."""
lcurve_txt = os.path.join(
self.datadir, "lcurve_txt_lc" + HEN_FILE_EXTENSION
)
command = "{0} --save-all -f 128".format(lcurve_txt)
hen.fspec.main(command.split())
def test_exposure(self):
"""Test exposure calculations from unfiltered files."""
lcname = os.path.join(
self.datadir, "monol_testA_E3-50_lc" + HEN_FILE_EXTENSION
)
ufname = os.path.join(self.datadir, "monol_testA_uf.evt")
| |
<reponame>nirvik/iWant<filename>iwant/core/protocols.py
from twisted.internet import reactor, defer
from twisted.internet.protocol import Protocol, ClientFactory, DatagramProtocol
import os
import progressbar
import math
import hashlib
import time
from struct import unpack, calcsize
from engine.fileindexer.piece import piece_size
from messagebaker import bake, unbake
from constants import LEADER, PEER_DEAD, FILE_TO_BE_DOWNLOADED,\
REQ_CHUNK, FILE_CONFIRMATION_MESSAGE, INIT_FILE_REQ,\
INTERESTED, UNCHOKE, GET_HASH_IDENTITY, HASH_IDENTITY_RESPONSE,\
FILE_RESP_FMT
from iwant.core.engine.fileindexer import fileHashUtils
from iwant.core.config import SERVER_DAEMON_PORT
from iwant.core.constants import CHUNK_SIZE
class BaseProtocol(Protocol):
def __init__(self):
self.special_handler = None
def connectionMade(self):
pass
def sendLine(self, line):
self.transport.write(str(line))
def sendRaw(self, buffered):
self.transport.write(buffered)
def escape_dollar_sign(self, data):
return data.replace(self.delimiter, '')
def hookHandler(self, fn):
self.special_handler = fn
def unhookHandler(self):
self.special_handler = None
def dataReceived(self, data):
if self.special_handler:
self.special_handler(data)
else:
for char in data:
self.buff += char
if char == self.delimiter:
request_str = self.escape_dollar_sign(self.buff)
self.buff = ''
self.serviceMessage(request_str)
def serviceMessage(self, message):
pass
class FilemonitorClientProtocol(Protocol):
'''
This protocol updates the server about:
1. If all the files in the shared folder are indexed or not
2. Inform the server about the new updated indexed files(the entire dump)
'''
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
# print '@filemonitor protocol'
# print 'event {0}'.format(self.factory.event)
updated_msg = bake(
self.factory.event,
shared_folder=self.factory.updates['shared_folder'],
ADD=self.factory.updates['ADD'],
DEL=self.factory.updates['DEL'])
self.transport.write(updated_msg)
self.transport.loseConnection()
class FilemonitorClientFactory(ClientFactory):
def __init__(self, event, updates):
'''
:param config_path : string
config_path contains the .iwant directory path
'''
self.event = event
self.updates = updates
def buildProtocol(self, addr):
return FilemonitorClientProtocol(self)
class PeerdiscoveryProtocol(DatagramProtocol):
'''
Used by the election daemon
'''
def escape_hash_sign(self, string):
return string.replace(self.delimiter, '')
def _process_msg(self, req, addr):
pass
def send(self, msgObj, addr):
self.transport.write(str(msgObj), tuple(addr))
def datagramReceived(self, datagram, addr):
for dat in datagram:
self.buff += dat
if dat == self.delimiter:
req_str = self.escape_hash_sign(self.buff)
self.buff = ''
self._process_msg(req_str, addr)
self.buff = ''
class ServerElectionProtocol(Protocol):
'''
This protocol is used by the election daemon to communicate with the server about:
1. New leader
2. Node is dead (only the leader node passes information about the dead node to its local server. Rest done)
'''
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
if self.factory.dead_peer is None:
update_msg = bake(
key=LEADER,
leader=(
self.factory.leader_host,
self.factory.leader_port))
else:
update_msg = bake(PEER_DEAD, dead_uuid=self.factory.dead_peer)
self.transport.write(str(update_msg))
self.transport.loseConnection()
class ServerElectionFactory(ClientFactory):
def __init__(self, leader_host, leader_port, dead_peer=None):
'''
:param leader_host : string
:param leader_port : int
:param dead_peer : bool
'''
self.leader_host = leader_host
self.leader_port = leader_port
self.dead_peer = dead_peer
def buildProtocol(self, addr):
return ServerElectionProtocol(self)
class FileDownloadProtocol(BaseProtocol):
"""
This handles the entire file downloading.
It initiates a file transfer request by connecting to the seeder along with the file hash.
It then receives a piece_hash as a response, to which we hash it and compare it to the file root hash we received from the leader. (file root hash is the hash of concatenated hashes of pieces)
If that succeeds, it tells the seeder to start sending the file.
The seeder sends an `unchoke` message, and then we start accepting the chunks
Each time we receive a piece, we hash it and compare it with the piece_hash chunk we received earlier. If that matches, we can safely write to the file.
Also, if the file already exists in our download folder, we request the seeder to send us the rest of the file by providing remaining piece ranges as parameter.
"""
def __init__(self, factory):
self.factory = factory
self.piece_hashes = ''
self.delimiter = '\r'
self.special_handler = None
self._unprocessed = b''
self.buff = ''
self.piece_buffer = b''
self._receive_format = FILE_RESP_FMT
self.event_handlers = {
FILE_CONFIRMATION_MESSAGE: self.verify_pieces,
UNCHOKE: self._start_transfer
}
def connectionMade(self):
initiate_file_transfer_req_msg = bake(
INTERESTED,
filehash=self.factory.file_checksum)
self.sendLine(initiate_file_transfer_req_msg)
def serviceMessage(self, data):
key, value = unbake(data)
self.event_handlers[key](value)
@defer.inlineCallbacks
def verify_pieces(self, data):
self.piece_hashes = data['piecehashes']
hasher = hashlib.md5()
hasher.update(self.piece_hashes)
if hasher.hexdigest() == self.factory.file_root_hash:
new_file_in_resume_table = yield fileHashUtils.check_hash_present_in_resume(self.factory.file_handler.name, self.factory.dbpool)
if not new_file_in_resume_table:
# add the file properties to the resume table
file_entry = (
self.factory.file_handler.name,
0,
self.factory.file_size,
self.factory.file_checksum,
self.piece_hashes,
self.factory.file_root_hash,
False)
yield fileHashUtils.add_new_file_entry_resume(file_entry, self.factory.dbpool)
load_file_msg = bake(
INIT_FILE_REQ,
filehash=self.factory.file_checksum)
self.sendLine(load_file_msg)
def _start_transfer(self, data):
if data['unchoke']:
self.hookHandler(self.rawDataReceived)
self.request_for_pieces(bootstrap=True)
def rawDataReceived(self, data):
all_data = self._unprocessed + data
prefixLength = calcsize(FILE_RESP_FMT)
currentOffset = 0
self._unprocessed = all_data
while len(all_data) >= currentOffset + prefixLength:
messageStart = currentOffset + prefixLength
piece_number, block_number, length = unpack(
FILE_RESP_FMT, all_data[
currentOffset: messageStart])
messageEnd = messageStart + length
if messageEnd > len(all_data):
break
file_data = all_data[messageStart: messageEnd]
self.process_piece(file_data, piece_number, block_number)
currentOffset = messageEnd
self._unprocessed = all_data[currentOffset:]
def process_piece(self, piece_data, piece_number, block_number):
if piece_number != self.factory.last_piece - 1:
self.piece_buffer += piece_data
if len(self.piece_buffer) == self.factory.piece_size:
final_piece_data = self.piece_buffer
self.write_piece_to_file(final_piece_data, piece_number)
self.piece_buffer = ''
else:
self.piece_buffer += piece_data
if len(self.piece_buffer) == int(self.factory.last_piece_size):
final_piece_data = self.piece_buffer
self.write_piece_to_file(final_piece_data, piece_number)
self.piece_buffer = ''
@defer.inlineCallbacks
def write_piece_to_file(self, piece_data, piece_number):
self.factory.file_handler.seek(piece_number * self.factory.piece_size)
hasher = hashlib.md5()
hasher.update(piece_data)
if hasher.hexdigest() == self.piece_hashes[
piece_number *
32: (
piece_number *
32) +
32]:
self.factory.download_status += len(piece_data)
self.factory.file_handler.write(piece_data)
self.factory.bar.update(self.factory.download_status)
if self.factory.download_status >= int(self.factory.file_size *
1000.0 * 1000.0):
self.factory.file_handler.close()
self.transport.loseConnection()
yield fileHashUtils.remove_resume_entry(self.factory.file_handler.name, self.factory.dbpool)
print '[DOWNLOAD FINISHED]: {0}'.format(self.factory.file_handler.name)
# def write_to_file(self, file_data, piece_num, block_num):
# self.factory.download_status += len(file_data)
# self.factory.file_handler.seek(piece_num * self.factory.piece_size + block_num * CHUNK_SIZE)
# self.factory.file_handler.write(file_data)
# if self.factory.download_status >= self.factory.file_size * \
# 1000.0 * 1000.0:
# print 'closing connection'
# self.factory.file_handler.close()
# self.transport.loseConnection()
def request_for_pieces(self, bootstrap=None):
piece_range_data = [
self.factory.start_piece,
self.factory.blocks_per_piece,
self.factory.last_piece,
self.factory.blocks_per_last_piece]
request_chunk_msg = bake(
REQ_CHUNK,
piece_data=piece_range_data) # have to request for a chunk range
self.sendLine(request_chunk_msg)
class FileDownloadFactory(ClientFactory):
protocol = FileDownloadProtocol
def __init__(self, **kwargs):
self.peers_list = kwargs['peers_list']
self.file_handler = kwargs['file_handler']
self.file_size = kwargs['file_size']
self.file_checksum = kwargs['file_checksum']
self.file_root_hash = kwargs['file_root_hash']
self.resume_from = kwargs['resume_from']
self.dbpool = kwargs['dbpool']
self.piece_size = piece_size(self.file_size)
self.total_pieces = int(
math.ceil(
self.file_size *
1000.0 *
1000.0 /
self.piece_size))
self.start_piece = self.resume_from
self.last_piece = self.total_pieces
self.last_piece_size = self.file_size * 1000.0 * \
1000.0 - ((self.total_pieces - 1) * self.piece_size)
self.blocks_per_piece = int(self.piece_size / CHUNK_SIZE)
self.blocks_per_last_piece = int(
math.ceil(
self.last_piece_size /
CHUNK_SIZE))
self.download_status = 0
if self.start_piece != 0 and self.start_piece != (self.last_piece - 1):
self.download_status = (self.start_piece) * self.piece_size
elif self.start_piece == self.last_piece - 1 and self.start_piece != 0:
self.download_status = (
self.start_piece - 1) * self.last_piece_size
self.bar = progressbar.ProgressBar(
maxval=int(self.file_size * 1000.0 * 1000.0),
widgets=[
progressbar.Bar(
'=',
'[',
']'),
' ',
progressbar.Percentage(),
' ',
progressbar.Timer()]).start()
def connectAnotherPeer(self, connector, reason):
self.peers_list.remove(connector.host)
if len(self.peers_list) != 0:
# recompute the starting piece and start download from there
self.start_piece = int(math.floor(
(self.download_status / (self.file_size * 1000.0 * 1000.0)) * self.total_pieces))
# print 'now the starting piece is {0}'.format(self.start_piece)
connector.host = self.peers_list[0]
connector.connect()
else:
print 'out of peers'
def clientConnectionLost(self, connector, reason):
# self.reconnect(connector, reason)
# print FileDownloadFactory.__name__, ': closing connections'
pass
def clientConnectionFailed(self, connector, reason):
self.connectAnotherPeer(connector, reason)
def buildProtocol(self, addr):
return FileDownloadProtocol(self)
class DownloadManagerProtocol(BaseProtocol):
def __init__(self, factory):
self.factory = factory
self.delimiter = '\r'
self.special_handler = None
self.buff = ''
self.event_handlers = {
HASH_IDENTITY_RESPONSE: self._build_new_files_folders
}
def connectionMade(self):
get_file_identity_msg = bake(
GET_HASH_IDENTITY,
checksum=self.factory.checksum)
self.sendLine(get_file_identity_msg)
def serviceMessage(self, data):
key, value = unbake(message=data)
self.event_handlers[key](value)
def bake_client_message(self, msg):
msg_to_client = bake(
FILE_TO_BE_DOWNLOADED,
message=msg)
self.factory.client_connection.sendLine(msg_to_client)
self.factory.client_connection.transport.loseConnection()
@defer.inlineCallbacks
def _build_new_files_folders(self, response):
self.transport.loseConnection()
client_response = {}
meta_info = response['file_structure_response']
if meta_info['isFile']:
filesize = meta_info['size']
# file_root_hash = meta_info['roothash']
file_checksum = self.factory.checksum
if meta_info['isWindows']:
filename = meta_info['filename'].rsplit('\\')[-1]
else:
filename = os.path.basename(meta_info['filename'])
filepath = os.path.join(self.factory.download_folder, filename)
# compare leader sent root hash and peer sent root hash
if self.factory.roothash == meta_info['roothash']:
client_response['isFile'] = True
client_response['filename'] = filepath
client_response['filesize'] = filesize
client_response['checksum'] = file_checksum
self.bake_client_message(client_response)
yield self.init_file(
filepath,
filesize,
file_checksum,
self.factory.roothash)
else:
seeder_directory_root = meta_info['rootDirectory']
is_windows = meta_info['isWindows']
if not is_windows:
client_directory_root = os.path.join(
self.factory.download_folder,
os.path.basename(seeder_directory_root))
else:
seeder_directory_root_basepath = seeder_directory_root.rsplit(
'\\')[-1]
client_directory_root = os.path.join(
self.factory.download_folder,
seeder_directory_root_basepath)
client_directory_root = os.path.realpath(client_directory_root)
# this list contains (final pathnames of files with respect to
# client path, size, hash)
client_files_to_create = []
if not os.path.isdir(client_directory_root):
os.mkdir(client_directory_root)
client_response['isFile'] = False
client_response['rootDirectory'] = client_directory_root
client_response['rootDirectoryChecksum'] = self.factory.checksum
client_response['files'] = [] # [(filename, size, checksum)]
# contains [( dirpath, filename, size, file_hash, roothash)]
files_list = meta_info['files']
for file_property in files_list:
parent_path, filename, size, file_checksum, file_root_hash = file_property
client_subdirectories_path = client_directory_root
relative_subdirectory = parent_path[
len(seeder_directory_root):]
if not is_windows:
subdirectories = relative_subdirectory.split(
'/') # add windows support
else:
subdirectories = relative_subdirectory.split(
'\\') # add windows support
for subdirectory in subdirectories:
client_directory_path = os.path.join(
client_subdirectories_path,
subdirectory)
if not os.path.isdir(client_directory_path):
os.mkdir(client_directory_path)
client_subdirectories_path = client_directory_path
absolute_file_path = os.path.join(
client_subdirectories_path,
filename)
client_files_to_create.append(
(absolute_file_path,
size,
file_checksum,
file_root_hash))
client_response['files'].append(
(absolute_file_path, size, | |
distribution on counts[i] trials with probability of
success probs[i].
"""
with ops.name_scope(name, "stateless_random_binomial",
[shape, seed, counts, probs]) as name:
shape = tensor_util.shape_tensor(shape)
probs = ops.convert_to_tensor(
probs, dtype_hint=dtypes.float32, name="probs")
counts = ops.convert_to_tensor(
counts, dtype_hint=probs.dtype, name="counts")
result = gen_stateless_random_ops.stateless_random_binomial(
shape=shape, seed=seed, counts=counts, probs=probs, dtype=output_dtype)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_gamma")
@dispatch.add_dispatch_support
def stateless_random_gamma(shape,
seed,
alpha,
beta=None,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a gamma distribution.
The generated values follow a gamma distribution with specified concentration
(`alpha`) and inverse scale (`beta`) parameters.
This is a stateless version of `tf.random.gamma`: if run twice with the same
seeds, it will produce the same pseudorandom numbers. The output is consistent
across multiple runs on the same hardware (and between CPU and GPU), but may
change between versions of TensorFlow or on non-CPU/GPU hardware.
A slight difference exists in the interpretation of the `shape` parameter
between `stateless_gamma` and `gamma`: in `gamma`, the `shape` is always
prepended to the shape of the broadcast of `alpha` with `beta`; whereas in
`stateless_gamma` the `shape` parameter must always encompass the shapes of
each of `alpha` and `beta` (which must broadcast together to match the
trailing dimensions of `shape`).
Note: Because internal calculations are done using `float64` and casting has
`floor` semantics, we must manually map zero outcomes to the smallest
possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This
means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise
should. This bias can only happen for small values of `alpha`, i.e.,
`alpha << 1` or large values of `beta`, i.e., `beta >> 1`.
The samples are differentiable w.r.t. alpha and beta.
The derivatives are computed using the approach described in
(Figurnov et al., 2018).
Example:
```python
samples = tf.random.stateless_gamma([10, 2], seed=[12, 34], alpha=[0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random.stateless_gamma([7, 5, 2], seed=[12, 34], alpha=[.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
alpha = tf.constant([[1.], [3.], [5.]])
beta = tf.constant([[3., 4.]])
samples = tf.random.stateless_gamma(
[30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta)
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
with tf.GradientTape() as tape:
tape.watch([alpha, beta])
loss = tf.reduce_mean(tf.square(tf.random.stateless_gamma(
[30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta)))
dloss_dalpha, dloss_dbeta = tape.gradient(loss, [alpha, beta])
# unbiased stochastic derivatives of the loss function
alpha.shape == dloss_dalpha.shape # True
beta.shape == dloss_dbeta.shape # True
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
alpha: Tensor. The concentration parameter of the gamma distribution. Must
be broadcastable with `beta`, and broadcastable with the rightmost
dimensions of `shape`.
beta: Tensor. The inverse scale parameter of the gamma distribution. Must be
broadcastable with `alpha` and broadcastable with the rightmost dimensions
of `shape`.
dtype: Floating point dtype of `alpha`, `beta`, and the output.
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random gamma values.
For each i, each `samples[..., i] is an independent draw from the gamma
distribution with concentration alpha[i] and scale beta[i].
"""
with ops.name_scope(name, "stateless_random_gamma",
[shape, seed, alpha, beta]) as name:
shape = tensor_util.shape_tensor(shape)
alpha = ops.convert_to_tensor(alpha, dtype=dtype, name="alpha")
beta = ops.convert_to_tensor(
beta if beta is not None else 1, name="beta", dtype=dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(alpha), array_ops.shape(beta))
alpha_broadcast = array_ops.broadcast_to(alpha, broadcast_shape)
result = math_ops.maximum(
np.finfo(alpha.dtype.as_numpy_dtype).tiny,
gen_stateless_random_ops.stateless_random_gamma_v2(
shape, seed=seed, alpha=alpha_broadcast) / beta)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_poisson")
@dispatch.add_dispatch_support
def stateless_random_poisson(shape,
seed,
lam,
dtype=dtypes.int32,
name=None):
"""Outputs deterministic pseudorandom values from a Poisson distribution.
The generated values follow a Poisson distribution with specified rate
parameter.
This is a stateless version of `tf.random.poisson`: if run twice with the same
seeds, it will produce the same pseudorandom numbers. The output is consistent
across multiple runs on the same hardware, but may change between versions of
TensorFlow or on non-CPU/GPU hardware.
A slight difference exists in the interpretation of the `shape` parameter
between `stateless_poisson` and `poisson`: in `poisson`, the `shape` is always
prepended to the shape of `lam`; whereas in `stateless_poisson` the shape of
`lam` must match the trailing dimensions of `shape`.
Example:
```python
samples = tf.random.stateless_poisson([10, 2], seed=[12, 34], lam=[5, 15])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random.stateless_poisson([7, 5, 2], seed=[12, 34], lam=[5, 15])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
rate = tf.constant([[1.], [3.], [5.]])
samples = tf.random.stateless_poisson([30, 3, 1], seed=[12, 34], lam=rate)
# samples has shape [30, 3, 1], with 30 samples each of 3x1 distributions.
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
lam: Tensor. The rate parameter "lambda" of the Poisson distribution. Shape
must match the rightmost dimensions of `shape`.
dtype: Dtype of the samples (int or float dtypes are permissible, as samples
are discrete). Default: int32.
name: A name for the operation (optional).
Returns:
samples: A Tensor of the specified shape filled with random Poisson values.
For each i, each `samples[..., i]` is an independent draw from the Poisson
distribution with rate `lam[i]`.
"""
with ops.name_scope(name, "stateless_random_poisson",
[shape, seed, lam]) as name:
shape = tensor_util.shape_tensor(shape)
result = gen_stateless_random_ops.stateless_random_poisson(
shape, seed=seed, lam=lam, dtype=dtype)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_normal")
@dispatch.add_dispatch_support
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random.normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = tensor_util.shape_tensor(shape)
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
result = math_ops.add(rnd * stddev, mean, name=name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("random.stateless_truncated_normal")
@dispatch.add_dispatch_support
def stateless_truncated_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values, truncated normally distributed.
This is a stateless version of `tf.random.truncated_normal`: if run twice with
the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
mean: A 0-D Tensor or Python value of | |
#! /usr/bin/env python
#All Functions
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
#Select the Image Filename
#FILENAME = 'chessboard.png'
#FILENAME = 'chessboard.jpg'
#FILENAME = 'chessboard_skew.jpg'
#FILENAME = 'checkerboard2.png'
#FILENAME ='image.jpg'
#FILENAME = "lena1.png"
class TCOLORS:
GRAY = '\x1b[30m'
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
PURPLE = '\x1b[35m'
CYAN = '\x1b[36m'
NORMAL = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
def rgb2gray(rgb):
'''
Read in an RGB image and return a Grayscale image in ndarray format
'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def gen_image_derivatives(gray_image):
'''
Pass in a grayscale image in ndarray format
Returns tuple of image derivatives
(image dx, image dy)
'''
height = len(gray_image)
width = len(gray_image[0])
#print "Width: %d" % width
#print "Height: %d" % height
#Find the derivative of an image
image_x = np.ndarray(shape=(height, width), dtype=np.int32)
image_y = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height - 1):
for x in range(0, width - 1):
#Get rid of edge cases
if (x < 1) or (y < 1) or (x > width - 1) or (y > height - 1):
image_x[y, x] = 0
#X Data
#Ros Before
image_x[y, x] = gray_image[y - 1, x + 1] - gray_image[y - 1, x - 1]
#Current Ros
image_x[y, x] += gray_image[y , x + 1] - gray_image[y , x - 1]
#Row After
image_x[y, x] += gray_image[y + 1, x + 1] - gray_image[y + 1, x - 1]
image_x[y, x] = int(abs(image_x[y, x]) / 3)
#Y Data
#Column Above
image_y[y, x] = gray_image[y + 1, x - 1] - gray_image[y - 1, x - 1]
#Current Column
image_y[y, x] += gray_image[y + 1, x ] - gray_image[y - 1, x ]
#Column Below
image_y[y, x] += gray_image[y + 1, x + 1] - gray_image[y - 1, x + 1]
image_y[y,x] = int (abs(image_y[y, x]) / 3)
return (image_x, image_y)
def gen_image_sobel(gray_image):
'''
Pass in a grayscale image in ndarray format
Returns tuple of image derivatives
(image dx, image dy)
'''
height = len(gray_image)
width = len(gray_image[0])
#print "Width: %d" % width
#print "Height: %d" % height
#Find the derivative of an image
image_x = np.ndarray(shape=(height, width), dtype=np.int32)
image_y = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height - 1):
for x in range(0, width - 1):
#Get rid of edge cases
if (x < 1) or (y < 1) or (x > width - 1) or (y > height - 1):
image_x[y, x] = 0
#X Data
#Ros Before
image_x[y, x] = gray_image[y - 1, x + 1] - gray_image[y - 1, x - 1]
#Current Ros
image_x[y, x] += (gray_image[y , x + 1] * 2) - (gray_image[y , x - 1] * 2)
#Row After
image_x[y, x] += gray_image[y + 1, x + 1] - gray_image[y + 1, x - 1]
image_x[y, x] = int(abs(image_x[y, x]) / 4)
#Y Data
#Column Above
image_y[y, x] = gray_image[y + 1, x - 1] - gray_image[y - 1, x - 1]
#Current Column
image_y[y, x] += (gray_image[y + 1, x ] * 2) - (gray_image[y - 1, x ] * 2)
#Column Below
image_y[y, x] += gray_image[y + 1, x + 1] - gray_image[y - 1, x + 1]
image_y[y,x] = int (abs(image_y[y, x]) / 4)
return (image_x, image_y)
GAUSSIAN_BITRANGE=18
GAUSSIAN_DIST=1 #Mapping from gaussian location to array position (1 = 1:1)
def gen_deviation_array(sigma, length=5):
midpoint = int(length / 2)
sd = []
for i in range(length):
d = abs((i - midpoint))
sd.append((1 / (math.sqrt(2 * math.pi) * sigma)) * math.exp(-(d**2)/(2 * sigma**2)))
#Normalize all the values
#scale_value = 1 / sd[midpoint]
#for i in range(length):
# sd[i] = sd[i] * scale_value
return sd
def gen_2d_deviation_array(sigma, length=5):
midpoint = int(length / 2)
sd = np.ndarray(shape=(length, length), dtype=np.float)
for y in range (length):
for x in range(length):
x_abs = abs((x - midpoint))
y_abs = abs((y - midpoint))
sd[y, x] = ((1 / (math.sqrt(2 * math.pi))) * math.exp(-(x_abs**2 + y_abs**2)/(2*sigma**2)))
scale_value = 1 / sd[midpoint, midpoint]
for y in range (length):
for x in range(length):
sd[y, x] = sd[y, x] * scale_value
return sd
def convert_gaussian_to_digital_array(gaussian_array, bitrange = GAUSSIAN_BITRANGE, dist = GAUSSIAN_DIST):
'''
Generate an integer representation of a guassian array distribution,
Takes in a floating point gaussian array as well as the bitrange to map to
and the dist array element is (usually 1 for 1 to 1 pixel mapping)
'''
maxvalue = (2 ** bitrange) - 1
midpoint = int(len(gaussian_array) / 2)
digital_array = [0] * len(gaussian_array)
for i in range(len(gaussian_array)):
digital_array[i] = int(maxvalue * gaussian_array[i])
if digital_array[i] < 0:
digital_array[i] = 0
return digital_array
def generate_matrix_values(ix, iy, ga):
'''
ix = derivative of image WRT X
iy = derivative of image WRT Y
ga = Gaussian Array
Generate the following arrays
Sum_(u,v)<Ix^2 * W(u,v)>
Sum_(u,v)<IxIy * W(u,v)>
Sum_(u,v)<Iy^2 * W(u,v)>
'''
width = len(ix[0])
height = len(ix)
win_height = ga.shape[0]
win_width = ga.shape[1]
win_x_midpoint = int(win_width / 2)
win_y_midpoint = int(win_height / 2)
#print ("window width: %d" % win_width)
#print ("window height: %d" % win_height)
#print ("window midpoint x: %d" % win_x_midpoint)
#print ("window midpoint y: %d" % win_y_midpoint)
a_out = np.ndarray(shape=(height, width), dtype=np.int32)
bc_out = np.ndarray(shape=(height, width), dtype=np.int32)
d_out = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height):
for x in range(0, width):
#Get rid of edge cases
a_out[y, x] = 0
bc_out[y, x] = 0
d_out[y, x] = 0
if (x < win_x_midpoint) or (y < win_y_midpoint) or (x > width - win_x_midpoint - 1) or (y > height - win_y_midpoint - 1):
continue
for wy in range (win_height):
for wx in range(win_width):
#X Values
#pos = win_midpoint - i
xpos = wx - win_x_midpoint
ypos = wy - win_y_midpoint
a_out[y, x] += float(ix[y + ypos, x + xpos] * ix[y + ypos, x + xpos] * ga[wx, wy])
bc_out[y, x] += float(ix[y + ypos, x + xpos] * iy[y + ypos, x + xpos] * ga[wx, wy])
d_out[y, x] += float(iy[y + ypos, x + xpos] * iy[y + ypos, x + xpos] * ga[wx, wy])
return (a_out, bc_out, d_out)
def generate_mc_debug(a, bc, d, k, threshold):
'''
Return an array of corners that are found using the 'k' value
Arguments:
a: Sum_(u,v) Ix(u,v)^2 * W(u,v)
bc: Sum_(u,v) Ix(u,v)Iy(u,v) * W(u,v)
d: Sum_(u,v) Iy(u,v)^2 * W(u,v)
k: Scaling value of corners to detect
threshold: value at which a 'good' corner is detected
Return a new image with only the corners highlighted and the intermediately
generated images
'''
width = len(a[0])
height = len(a)
rarray = np.ndarray(shape=(height, width))
corners = np.ndarray(shape=(height, width))
det = np.ndarray(shape=(height, width))
trc = np.ndarray(shape=(height, width))
max_r = 0
for y in range(0, height):
for x in range(0, width):
r = float( ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x])) - k * ((a[y,x] + d[y,x]) ** 2))
det[y, x] = ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x]))
trc[y, x] = k * ((a[y,x] + d[y,x]) ** 2)
if r > max_r:
max_r = r
if r < 0:
corners[y, x] = 0
elif r > threshold:
corners[y, x] = 255
else:
corners[y, x] = 0
print "Max R: %d" % max_r
return (corners, det, trc)
def generate_mc(a, bc, d, k, threshold):
'''
Return an array of corners that are found using the 'k' value
Arguments:
a: Sum_(u,v) Ix(u,v)^2 * W(u,v)
bc: Sum_(u,v) Ix(u,v)Iy(u,v) * W(u,v)
d: Sum_(u,v) Iy(u,v)^2 * W(u,v)
k: Scaling value of corners to detect
threshold: value at which a 'good' corner is detected
Return a new image with only the corners highlighted
'''
width = len(a[0])
height = len(a)
rarray = np.ndarray(shape=(height, width))
corners = np.ndarray(shape=(height, width))
max_r = 0
for y in range(0, height):
for x in range(0, width):
r = float( ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x])) - k * ((a[y,x] + d[y,x]) ** 2))
if r > max_r:
max_r = r
if r < 0:
corners[y, x] = 0
elif r > threshold:
corners[y, | |
<filename>anhima/tree.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This module provides some facilities for constructing and plotting trees. It
is mostly a wrapper around a very limited subset of functions from the R
`ape` package (Analyses of Phylogenetics and Evolution).
R must be installed, the `ape` R package must be installed, and the Python
package ``rpy2`` must be installed, e.g.::
$ apt-get install r-base
$ pip install rpy2
$ R
> install.packages("ape")
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/tree.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# standard library dependencies
import tempfile
import logging
# third party dependencies
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
logger = logging.getLogger(__name__)
debug = logging.debug
_r_initialised = False
r = None
ro = None
grdevices = None
ape = None
def _init_r():
"""Private function to initialise R, only executed when needed.
"""
global _r_initialised
global r
global ro
global grdevices
global ape
if not _r_initialised:
import rpy2.robjects as ro # noqa
from rpy2.robjects import r
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri as numpy2ri
numpy2ri.activate()
grdevices = importr('grDevices')
ape = importr(
'ape',
robject_translations={
'delta.plot': 'delta_dot_plot',
'dist.dna': 'dist_dot_dna',
'dist.nodes': 'dist_dot_nodes',
'node.depth': 'node_dot_depth',
'node.depth.edgelength': 'node_dot_depth_dot_edgelength',
'node.height': 'node_dot_height',
'node.height.clado': 'node_dot_height_dot_clado',
'prop.part': 'prop_dot_part',
}
)
# Define custom R functions to help with coloring tree edges by
# population. These functions were written by <NAME>
# <<EMAIL>> at the Wellcome Trust Sanger Institute.
r("""
library(ape)
######################################################################################################################
#' Computes the number of leaves of each group that hang from each branch.
#' @param phylotree A tree of class phylo.
#' @param labelgroups A vector with the group of the tip labels (named with the labels).
#' @return A named matrix with the membership counts for each interior edge of the tree.
######################################################################################################################
computeEdgeGroupCounts <- function(phylotree, labelgroups) {
labels <- phylotree$tip.label
num_tips <- length(labels)
edge_names <- unique(sort(c(phylotree$edge)))
# This matrix will keep track of the group counts for each edge.
edge_group_counts <- matrix(0, nrow=length(edge_names), ncol=length(unique(sort(labelgroups))))
rownames(edge_group_counts) <- edge_names
colnames(edge_group_counts) <- unique(labelgroups)
# Init the leaf branches.
sapply(1:num_tips, function(l) {
edge_group_counts[as.character(l), as.character(labelgroups[phylotree$tip.label[l]])] <<- 1
})
# Sort edges by the value of the descendent
# The first segment will contain the leaves whereas the second the branches (closer to leaves first).
# We need to do this because leaves are numbered 1:num_tips and the branches CLOSER to the leaves
# with higher numbers.
edges <- phylotree$edge[order(phylotree$edge[,2]),]
branches <- edges[num_tips:nrow(edges),]
edges[num_tips:nrow(edges),] <- branches[order(branches[,1],decreasing=T),]
invisible(apply(edges, 1, function(edge) {
# Check if we are connecting a leaf.
if(edge[2] <= num_tips) {
e <- as.character(edge[1])
g <- as.character(labelgroups[phylotree$tip.label[edge[2]]])
edge_group_counts[e,g] <<- edge_group_counts[e,g] + 1
}
else {
e1 <- as.character(edge[1])
e2 <- as.character(edge[2])
edge_group_counts[e1,] <<- edge_group_counts[e1,] + edge_group_counts[e2,]
}
}))
return(edge_group_counts)
}
######################################################################################################################
#' Assigns the color of the majority group (hanging from) each branch.
#' @param phylotree A tree of class phylo.
#' @param edge_group_counts A named matrix with the group counts for each branch.
#' @param groupcolors A named vector with the color of each group.
#' @param equality_color The color to be used if there is no majority group.
#' @return A vector with the colors to be used with the tree branches.
######################################################################################################################
assignMajorityGroupColorToEdges <- function(phylotree, edge_group_counts, groupcolors, equality_color="gray") {
edge_colors <- apply(phylotree$edge, 1, function(branch) {
e <- as.character(branch[2])
major_group_index <- which.max(edge_group_counts[e,])
if(all(edge_group_counts[e,] == edge_group_counts[e,major_group_index]))
return(equality_color)
else
return(groupcolors[colnames(edge_group_counts)[major_group_index]])
})
return(edge_colors)
}
""") # noqa
_r_initialised = True
def nj(dist_square, labels=None):
"""Wrapper for the `ape` ``nj`` function, which performs the
neighbor-joining tree estimation of Saitou and Nei (1987).
Parameters
----------
dist_square : array_like, shape (`n_samples`, `n_samples`)
A pairwise distance matrix in square form.
labels : sequence of strings, optional
A sequence of strings to label the tips of the tree. Must be in the
same order as rows of the distance matrix.
Returns
-------
An R object of class "phylo".
See Also
--------
anhima.dist.pairwise_distance
"""
# setup R
_init_r()
# normalise inputs
dist_square = np.asarray(dist_square)
assert dist_square.ndim == 2
assert dist_square.shape[0] == dist_square.shape[1]
# convert distance matrix to R
m = ro.vectors.Matrix(dist_square)
# assign row and column labels
if labels:
# map all strings to str
labels = [str(l) for l in labels]
s = ro.StrVector(labels)
m.rownames = s
m.colnames = s
# build the tree
tree = ape.nj(m)
return tree
def bionj(dist_square, labels=None):
"""Wrapper for the `ape` ``bionj`` function, which performs the BIONJ
algorithm of Gascuel (1997).
Parameters
----------
dist_square : array_like, shape (`n_samples`, `n_samples`)
A pairwise distance matrix in square form.
labels : sequence of strings, optional
A sequence of strings to label the tips of the tree. Must be in the
same order as rows of the distance matrix.
Returns
-------
An R object of class "phylo".
See Also
--------
anhima.dist.pairwise_distance
"""
# setup R
_init_r()
# normalise inputs
dist_square = np.asarray(dist_square)
assert dist_square.ndim == 2
assert dist_square.shape[0] == dist_square.shape[1]
# convert distance matrix to R
m = ro.vectors.Matrix(dist_square)
# assign row and column labels
if labels:
# map all strings to str
labels = [str(l) for l in labels]
s = ro.StrVector(labels)
m.rownames = s
m.colnames = s
# build the tree
tree = ape.bionj(m)
return tree
def plot_phylo(tree, plot_kwargs=None, add_scale_bar=None,
filename=None, width=None, height=None, units=None, res=None,
pointsize=None, bg=None, ax=None, imshow_kwargs=None):
"""Wrapper for the `ape` ``plot.phylo`` function, which plots phylogenetic
trees. Plotting will use the R `png` graphics device.
Parameters
----------
tree : R object of class "phylo"
The tree to plot.
plot_kwargs : dict-like, optional
A dictionary of keyword arguments that will be passed through to the
`ape` function ``plot.phylo()``. See the documentation for the `ape`
package for a full list of supported arguments.
add_scale_bar : dict-like, optional
A dictionary of keyword arguments that will be passed through to the
`ape` function ``add.scale.bar()``. See the documentation for the
`ape` package for a full list of supported arguments.
filename : string, optional
File path for the generated PNG image. If None, a temporary file will
be used.
width : int or float, optional
Width of the plot in `units`.
height : int or float, optional
Height of the plot in `units`.
units : {'px', 'in', 'cm', 'mm'}, optional
The units in which 'height' and 'width' are given. Can be 'px' (pixels,
the default), 'in' (inches), 'cm' or 'mm'.
res : int, optional
The nominal resolution in ppi which will be recorded in the bitmap
file, if a positive integer. Also used for 'units' other than the
default, and to convert points to pixels.
pointsize : float, optional
The default pointsize of plotted text, interpreted as big points (
1/72 inch) at 'res' ppi.
bg : color, optional
The background color.
ax : axes, optional
The axes on which to draw. If not provided, a new figure will be
created.
imshow_kwargs : dict-like
Additional keyword arguments passed through to `imshow()`.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
# setup R
_init_r()
# setup image file
if filename is None:
tmp = tempfile.NamedTemporaryFile(suffix='.png')
filename = tmp.name
# initialise PNG device
png_arg_names = 'width', 'height', 'units', 'res', 'pointsize', 'bg'
png_args = dict()
for n in png_arg_names:
v = locals()[n]
if v is not None:
png_args[n] = v
debug(filename)
debug(png_args)
grdevices.png(filename, **png_args)
# plot
if plot_kwargs is None:
plot_kwargs = dict()
# adapt values for certain properties
for k in 'tip.color', 'edge.color':
if k in plot_kwargs:
v = plot_kwargs[k]
if isinstance(v, (list, tuple, np.ndarray)):
plot_kwargs[k] = ro.StrVector(v)
debug(plot_kwargs)
ape.plot_phylo(tree, **plot_kwargs)
# add scale bar
if add_scale_bar is not None:
ape.add_scale_bar(**add_scale_bar)
# finalise PNG device
grdevices.dev_off()
# read in PNG for matplotlib plotting
png = mpimg.imread(filename)
# set up axes for matplotlib plotting
if ax is None:
# try to make the figure exactly the right size for image native
# resolution
pxw, pxh = png.shape[:2]
dpi = plt.rcParams['savefig.dpi']
w, h = pxw/dpi, pxh/dpi
fig, ax = plt.subplots(figsize=(w, h))
# no margin
fig.subplots_adjust(0, 0, 1, 1, hspace=0, wspace=0)
if imshow_kwargs is None:
imshow_kwargs = dict()
imshow_kwargs.setdefault('aspect', 'equal')
imshow_kwargs.setdefault('interpolation', 'none')
ax.imshow(png, **imshow_kwargs)
ax.set_axis_off()
return ax
def write_tree(tree, filename=None, **kwargs):
"""
Wrapper for the `ape` ``write.tree`` function, which writes in a file a
tree in parenthetic format using the Newick (also known as New Hampshire)
| |
<reponame>lucasondel/amdtk
"""Implementation of the training agorithms for the phone loop."""
import os
import time
import pickle
import shutil
import random
from ipyparallel.util import interactive
import numpy as np
def acc_stats(stats_list):
"""Accumulate a list of sufficient statistics.
Parameters
----------
stats_list : list
List of sufficient statistics.
Returns
-------
stats : dict
Accumulated sufficient statistics.
"""
new_stats = {}
for stats in stats_list:
for key1, model_stats in stats.items():
if key1 not in new_stats:
new_stats[key1] = {}
for key2, value in model_stats.items():
try:
new_stats[key1][key2] += value
except KeyError:
new_stats[key1][key2] = value
return new_stats
def add_stats(stats, a_stats):
"""Add a subset of the statistics.
Parameters
----------
stats : dictionary
Accumulated statistics.
a_stats : dictionary
Statistics to add from the total accumulated statistics.
"""
for key1, model_stats in a_stats.items():
if key1 not in stats:
stats[key1] = model_stats
else:
for key2, value in model_stats.items():
stats[key1][key2] += value
def remove_stats(stats, rm_stats):
"""Remove a subset of the statistics.
Parameters
----------
stats : dictionary
Accumulated statistics.
rm_stats : dictionary
Statistics to remove from the total accumulated statistics.
"""
for key1, model_stats in rm_stats.items():
for key2, value in model_stats.items():
stats[key1][key2] -= value
@interactive
def log_predictive(fea_file):
"""Lower-bound of the predictive distribution.
Parameters
----------
fea_file : str
Path to a features file (HTK format).
Returns
-------
stats : dict
Dictionary of with the log-predictive probability.
"""
# pylint: disable=too-many-locals
# Because this function is meant to be executed in a
# separated thread, we prefer to reduce the number of
# function call to simplify the dependencies.
# pylint: disable=global-variable-not-assigned
# pylint: disable=undefined-variable
# The value of these global variable will be pushed
# to the workers dynamically.
global MODEL, TEMP_DIR
# pylint: disable=redefined-outer-name
# pylint: disable=reimported
# These imports are done on the remote workers.
import os
import pickle
# Extract the key of the utterance.
basename = os.path.basename(fea_file)
key, ext = os.path.splitext(basename)
if '[' in ext:
idx = ext.index('[')
key += ext[idx:]
# Load the features.
data = read_htk(fea_file)
expected_llh, unit_stats, state_resps, comp_resps = \
MODEL.expected_log_likelihood(data)
# Add the normalizer to the stats to compute the
# lower bound.
stats = {}
stats[-1] = {
'E_log_X': expected_llh,
'N': data.shape[0]
}
# Store the stats.
out_path = os.path.join(TEMP_DIR, key)
with open(out_path, 'wb') as file_obj:
pickle.dump(stats, file_obj)
return out_path
@interactive
def std_exp(fea_file):
"""E-Step of the standard Variational Bayes.
Parameters
----------
fea_file : str
Path to a features file (HTK format).
Returns
-------
stats : dict
Dictionary of statistics.
"""
# pylint: disable=too-many-locals
# Because this function is meant to be executed in a
# separated thread, we prefer to reduce the number of
# function call to simplify the dependencies.
# pylint: disable=global-variable-not-assigned
# pylint: disable=undefined-variable
# The value of these global variable will be pushed
# to the workers dynamically.
global MODEL, TEMP_DIR, ALIGNMENTS
# pylint: disable=redefined-outer-name
# pylint: disable=reimported
# These imports are done on the remote workers.
import os
import pickle
# Extract the key of the utterance.
basename = os.path.basename(fea_file)
key, ext = os.path.splitext(basename)
if '[' in ext:
idx = ext.index('[')
key += ext[idx:]
# Load the features.
data = read_htk(fea_file)
# Compute the responsibilities per component.
if ALIGNMENTS is not None:
ali = ALIGNMENTS[fea_file]
else:
ali = None
expected_llh, unit_stats, state_resps, comp_resps = \
MODEL.expected_log_likelihood(data)
# Get the sufficient statistics of the model given the
# responsibilities.
stats = MODEL.get_stats(data, unit_stats, state_resps,
comp_resps)
# Add the normalizer to the stats to compute the
# lower bound.
stats[-1] = {
'E_log_X': expected_llh,
'N': data.shape[0]
}
# Store the stats.
out_path = os.path.join(TEMP_DIR, key)
with open(out_path, 'wb') as file_obj:
pickle.dump(stats, file_obj)
return out_path
@interactive
def count_frames(fea_file):
"""Count the number of frames in the features file.
Parameters
----------
fea_file : str
Path to the features file.
Returns
-------
count : int
Number of frames.
"""
return read_htk(fea_file).shape[0]
class StandardVariationalBayes(object):
"""Standard mean-field Variational Bayes training of the
phone loop model.
"""
def __init__(self, fealist, dview, train_args, tmpdir, alignments=None,
callback=None):
"""
Parameters
----------
fealist : list
List of features file.
dview : object
Remote client objects to parallelize the training.
train_args : dict
Training specific arguments.
tmpdir : str
Path to the directory where to store temporary results.
alignments : MLF data
Unit level alignments (optional).
callback : function
Function called after each batch/epoch.
"""
self.fealist = fealist
self.dview = dview
self.alignments = alignments
self.callback = callback
self.dir_path = tmpdir
self.epochs = int(train_args.get('epochs', 1))
self.initial_pruning = int(train_args.get('initial_pruning_threshold',
500))
self.pruning = int(train_args.get('pruning_threshold', 100))
with self.dview.sync_imports():
from amdtk import read_htk
def run(self, model):
"""Run the Standard Variational Bayes training.
Parameters
----------
model : :class:`PhoneLoop`
Phone Loop model to train.
"""
start_time = time.time()
for epoch in range(self.epochs):
# Create a temporary directory.
self.temp_dir = os.path.join(self.dir_path, 'epoch' +
str(epoch + 1))
os.makedirs(self.temp_dir, exist_ok=True)
# Set the pruning thresold.
if epoch == 0:
model.pruning_threshold = self.initial_pruning
else:
model.pruning_threshold = self.pruning
# Perform one epoch of the training.
lower_bound = self.epoch(model)
# Monitor the convergence after each epoch.
if self.callback is not None:
args = {
'model': model,
'lower_bound': lower_bound,
'epoch': epoch + 1,
'tmpdir': self.dir_path,
'time': time.time() - start_time
}
self.callback(args)
def epoch(self, model):
"""Perform one epoch (i.e. processing the whole data set)
of the training.
Parameters
----------
model : :class:`PhoneLoop`
Phone Loop model to train.
"""
# Propagate the model to all the remote nodes.
self.dview.push({
'MODEL': model,
'TEMP_DIR': self.temp_dir,
'ALIGNMENTS': self.alignments
})
# Optimize the latent variables given the current values of the
# parameters of the posteriors for each feature file.
paths = self.dview.map_sync(std_exp, self.fealist)
# Accumulate the statistics into a single statistics object.
total_stats = None
for path in paths:
if total_stats is None:
with open(path, 'rb') as file_obj:
total_stats = pickle.load(file_obj)
else:
with open(path, 'rb') as file_obj:
add_stats(total_stats, pickle.load(file_obj))
# Compute the lower bound before the update.
lower_bound = (total_stats[-1]['E_log_X'] -
model.kl_divergence()) / total_stats[-1]['N']
# Second step of the coordinate ascent: optimize the posteriors
# parameters given the values of the latent variables.
model.update(total_stats)
return lower_bound
class StochasticVariationalBayes(object):
"""Stochastic (mean-field) Variational Bayes training of the
phone loop model.
"""
def __init__(self, fealist, dview, train_args, tmpdir, alignments=None,
callback=None):
"""
Parameters
----------
fealist : list
List of features file.
dview : object
Remote client objects to parallelize the training.
train_args : dict
Training specific arguments.
tmpdir : str
Path to the directory where to store temporary results.
alignments : MLF data
Unit level alignments (optional).
callback : function
Function called after each batch/epoch.
"""
self.fealist = fealist
self.dview = dview
self.alignments = alignments
self.callback = callback
self.dir_path = tmpdir
self.epochs = int(train_args.get('epochs', 1))
self.batch_size = int(train_args.get('batch_size', 1))
self.initial_pruning = int(train_args.get('initial_pruning_threshold',
500))
self.pruning = int(train_args.get('pruning_threshold', 100))
self.forgetting_rate = float(train_args.get('forgetting_rate', .51))
self.delay = float(train_args.get('delay', 0))
self.scale = float(train_args.get('scale', 1))
with self.dview.sync_imports():
from amdtk import read_htk
# Count the total number of frames in the DB.
counts = self.dview.map_sync(count_frames, self.fealist)
self.n_frames = np.sum(counts)
def run(self, model):
"""Run the Stochastic Variational Bayes training.
Parameters
----------
model : :class:`PhoneLoop`
Phone Loop model to train.
"""
t = 0
start_time = time.time()
for epoch in range(self.epochs):
# Create a temporary directory.
self.temp_dir = os.path.join(self.dir_path, 'epoch' +
str(epoch + 1))
os.makedirs(self.temp_dir, exist_ok=True)
# Set the pruning thresold.
if epoch == 0:
model.pruning_threshold = self.initial_pruning
else:
model.pruning_threshold = self.pruning
# Perform one epoch of the training.
fealist = random.sample(self.fealist, len(self.fealist))
for i in range(0, len(self.fealist), self.batch_size):
# Time step of the gradient descent.
t += 1
# Compute the learning rate for the given time step.
lrate = self.scale * \
((self.delay + t)**(-self.forgetting_rate))
start = i
end = i + self.batch_size
lower_bound = self.batch(model,
fealist[start:end],
lrate)
# Monitor the convergence after each epoch.
if self.callback is not None:
args = {
'model': model,
'lower_bound': lower_bound,
'epoch': epoch + 1,
'batch': int(i / self.batch_size) + 1,
'n_batch': int(np.ceil(len(self.fealist) /
self.batch_size)),
'lrate': lrate,
'tmpdir': self.dir_path,
'time': time.time() - start_time
}
self.callback(args)
# Cleanup the resources allocated during the training.
self.cleanup()
def batch(self, model, batch_fea_list, lrate):
"""Perform one batch update.
Parameters
----------
model : :class:`PhoneLoop`
Phone Loop model to train.
batch_fea_list : list
| |
return_list:
continue
for provide in package.provides:
provide_name, provide_cmp, provide_version = split_name_with_versioning(provide)
if provide_name != dep_name:
continue
if dep_cmp == "":
return_list.append(package)
elif (provide_cmp == "=" or provide_cmp == "==") and version_comparison(provide_version, dep_cmp,
dep_version):
return_list.append(package)
elif (provide_cmp == "") and version_comparison(package.version, dep_cmp, dep_version):
return_list.append(package)
# https://github.com/polygamma/aurman/issues/67
elif (provide_cmp == "") and Package.optimistic_versioning:
return_list.append(package)
return return_list
def conflicting_with(self, package: 'Package') -> List['Package']:
"""
Returns the packages conflicting with "package"
:param package: The package to check for conflicts with
:return: List containing the conflicting packages
"""
return_list = []
if package.name in self.all_packages_dict:
return_list.append(self.all_packages_dict[package.name])
for conflict in package.conflicts:
for conflicting_package in self.provided_by(conflict):
if conflicting_package not in return_list:
return_list.append(conflicting_package)
provides = list(package.provides)
for providing in provides[:]:
prov_name, prov_cmp, prov_version = split_name_with_versioning(providing)
if prov_name == package.name:
provides.remove(providing)
provides.append("{}={}".format(package.name, package.version))
for providing in provides:
prov_name, prov_cmp, prov_version = split_name_with_versioning(providing)
if prov_name in self.conflicts_dict:
possible_conflict_packages = self.conflicts_dict[prov_name]
for possible_conflict_package in possible_conflict_packages:
if possible_conflict_package in return_list:
continue
for conflict in possible_conflict_package.conflicts:
conflict_name, conflict_cmp, conflict_version = split_name_with_versioning(conflict)
if conflict_name != prov_name:
continue
if conflict_cmp == "":
return_list.append(possible_conflict_package)
elif (prov_cmp == "=" or prov_cmp == "==") \
and version_comparison(prov_version, conflict_cmp, conflict_version):
return_list.append(possible_conflict_package)
return return_list
def append_packages_by_name(self, packages_names: Sequence[str]):
"""
Appends packages to this system by names.
:param packages_names: The names of the packages
"""
packages_names = set([strip_versioning_from_name(name) for name in packages_names])
packages_names_to_fetch = [name for name in packages_names if name not in self.all_packages_dict]
aur_names = packages_from_other_sources()[0]
for name in packages_names:
if name in packages_names_to_fetch:
continue
if name not in aur_names:
continue
package = self.all_packages_dict[name]
if package.type_of is not PossibleTypes.AUR_PACKAGE and package.type_of is not PossibleTypes.DEVEL_PACKAGE:
packages_names_to_fetch.append(name)
deleted_while_appending = False
while packages_names_to_fetch:
fetched_packages = Package.get_packages_from_aur(packages_names_to_fetch)
deps_of_the_fetched_packages = []
for package in fetched_packages:
deps_of_the_fetched_packages.extend(package.relevant_deps())
if package.name in self.all_packages_dict:
del self.all_packages_dict[package.name]
deleted_while_appending = True
self.append_packages(fetched_packages)
relevant_deps = list(set([strip_versioning_from_name(dep) for dep in deps_of_the_fetched_packages]))
packages_names_to_fetch = [dep for dep in relevant_deps if dep not in self.all_packages_dict]
if deleted_while_appending:
self.recreate_dicts()
def are_all_deps_fulfilled(self, package: 'Package', only_make_check: bool = False,
only_depends: bool = False, print_reason: bool = False) -> bool:
"""
if all deps of the package are fulfilled on the system
:param package: the package to check the deps of
:param only_make_check: True if one only wants make and check depends
:param only_depends: True if one only wants depends
:param print_reason: If the the reason for failing should be printed
:return: True if the deps are fulfilled, False otherwise
"""
for dep in package.relevant_deps(only_make_check=only_make_check, only_depends=only_depends):
if not self.provided_by(dep):
if print_reason:
aurman_note(
"Dependency {} of package {} is not fulfilled".format(
Colors.BOLD(Colors.LIGHT_MAGENTA(dep)),
Colors.BOLD(Colors.LIGHT_MAGENTA(package.name))
)
)
return False
else:
return True
@staticmethod
def calc_install_chunks(packages_to_chunk: Sequence['Package']) -> List[List['Package']]:
"""
Calculates the chunks in which the given packages would be installed.
Repo packages are installed at once, AUR packages one by one.
e.g. AUR1, Repo1, Repo2, AUR2 yields: AUR1, Repo1 AND Repo2, AUR2
:param packages_to_chunk: The packages to calc the chunks of
:return: The packages in chunks
"""
current_list: List['Package'] = []
return_list: List[List['Package']] = [current_list]
for package in packages_to_chunk:
if current_list and (package.type_of is not PossibleTypes.REPO_PACKAGE
or current_list[0].type_of is not package.type_of):
current_list = [package]
return_list.append(current_list)
else:
current_list.append(package)
return return_list
def sanitize_user_input(self, user_input: Sequence[str]) -> Set[str]:
"""
Finds the names of the packages for the user_input
Needed since user may also specify the version of a package,
hence package1>1.0 may yield package1 since package1 has version 2.0
:param user_input: The user input in a sequence
:return: A set containing the packages names
"""
sanitized_names = set()
for name in user_input:
providers_for_name = self.provided_by(name)
if not providers_for_name:
aurman_error("No providers for {} found.".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))
raise InvalidInput("No providers for {} found.".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))
elif len(providers_for_name) == 1:
sanitized_names.add(providers_for_name[0].name)
# more than one provider
else:
dep_providers_names = [package.name for package in providers_for_name]
dep_name = strip_versioning_from_name(name)
# name matches one of the providers names
if dep_name in dep_providers_names:
sanitized_names.add(dep_name)
else:
aurman_note(
"We found multiple providers for {}\nChoose one by entering the corresponding number.".format(
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
)
)
while True:
for i in range(0, len(providers_for_name)):
print(
"Number {}: {}".format(i + 1, self.repo_of_package(providers_for_name[i].name))
)
try:
user_input = int(input(aurman_question("Enter the number: ", False, False)))
if 1 <= user_input <= len(providers_for_name):
sanitized_names.add(providers_for_name[user_input - 1].name)
break
except ValueError:
print(aurman_error("That was not a valid choice!", False, False))
else:
print(aurman_error("That was not a valid choice!", False, False))
return sanitized_names
def hypothetical_append_packages_to_system(self, packages: List['Package'],
packages_names_print_reason: Iterable[str] = None,
print_way: bool = False) -> 'System':
"""
hypothetically appends packages to this system (only makes sense for the installed system)
and removes all conflicting packages and packages whose deps are not fulfilled anymore.
:param packages: the packages to append
:param packages_names_print_reason: print the uninstall reasons for packages
with names in this iterable
:param print_way: Prints the way of appending packages
:return: the new system
"""
new_system = System(list(self.all_packages_dict.values()))
if not packages:
return new_system
chunked_packages = System.calc_install_chunks(packages)
last_index = len(chunked_packages) - 1
for i, package_chunk in enumerate(chunked_packages):
# check if packages in chunk conflict each other
package_chunk_system = System(())
for package in package_chunk:
if package_chunk_system.conflicting_with(package):
break
package_chunk_system.append_packages((package,))
# no conflicts
else:
# calculate conflicting packages
conflicting_new_system_packages = []
for package in package_chunk:
# print why packages will be removed
if packages_names_print_reason is not None:
will_be_deleted = new_system.conflicting_with(package)
for package_to_be_removed in will_be_deleted:
if package_to_be_removed.name in packages_names_print_reason:
aurman_note(
"Package {} will be removed due to a conflict with {}".format(
Colors.BOLD(Colors.LIGHT_MAGENTA(package_to_be_removed.name)),
Colors.BOLD(Colors.LIGHT_MAGENTA(package.name))
)
)
# save the found packages for later deletion
conflicting_new_system_packages.extend(new_system.conflicting_with(package))
# remove duplicates
conflicting_new_system_packages = set(conflicting_new_system_packages)
# print what will be done
if print_way:
to_delete_packages_names = set()
to_upgrade_packages_names = set()
to_reinstall_packages_names = set()
packages_chunk_names = set([package.name for package in package_chunk])
for package in conflicting_new_system_packages:
if package.name not in packages_chunk_names:
to_delete_packages_names.add(package.name)
else:
old_package = new_system.all_packages_dict[package.name]
new_package = [
chunk_pack for chunk_pack in package_chunk if package.name == chunk_pack.name
][0]
if old_package.version == new_package.version:
to_reinstall_packages_names.add(package.name)
else:
to_upgrade_packages_names.add(package.name)
if to_upgrade_packages_names:
print(
" {} : {}".format(
Colors.BOLD(Colors.LIGHT_CYAN("Upgrade")),
", ".join(
[
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
for name in sorted(to_upgrade_packages_names)
]
)
)
)
if to_reinstall_packages_names:
print(
" {} : {}".format(
Colors.BOLD(Colors.LIGHT_MAGENTA("Reinstall")),
", ".join(
[
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
for name in sorted(to_reinstall_packages_names)
]
)
)
)
if to_delete_packages_names:
print(
" {} : {}".format(
Colors.BOLD(Colors.LIGHT_RED("Remove")),
", ".join(
[
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
for name in sorted(to_delete_packages_names)
]
)
)
)
to_install_packages_names = packages_chunk_names - set.union(
*[to_upgrade_packages_names, to_reinstall_packages_names]
)
if to_install_packages_names:
print(
" {} : {}".format(
Colors.BOLD(Colors.LIGHT_GREEN("Install")),
", ".join(
[
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
for name in sorted(to_install_packages_names)
]
)
)
)
# remove conflicting packages
if conflicting_new_system_packages:
deleted_packages = True
for package in conflicting_new_system_packages:
del new_system.all_packages_dict[package.name]
new_system = System(list(new_system.all_packages_dict.values()))
else:
deleted_packages = False
# append packages
new_system.append_packages(package_chunk)
# last exit brooklyn
# final check for sanity of the whole solution
# we do not accept mistakes!
if not deleted_packages and not (i == last_index):
continue
# delete packages whose deps are not fulfilled anymore
while True:
to_delete_packages = []
for package in new_system.all_packages_dict.values():
if packages_names_print_reason is not None and package.name in packages_names_print_reason:
if not new_system.are_all_deps_fulfilled(package, only_depends=True, print_reason=True):
to_delete_packages.append(package)
else:
if not new_system.are_all_deps_fulfilled(package, only_depends=True):
to_delete_packages.append(package)
if not to_delete_packages:
break
# print what will be done
if print_way:
packages_names_to_del = set([package.name for package in to_delete_packages])
print(
" {} : {}".format(
Colors.BOLD(Colors.LIGHT_RED("Remove")),
", ".join(
[
Colors.BOLD(Colors.LIGHT_MAGENTA(name))
for name in sorted(packages_names_to_del)
]
)
)
)
# actually delete the packages
for package in to_delete_packages:
del new_system.all_packages_dict[package.name]
new_system = System(list(new_system.all_packages_dict.values()))
return new_system
def differences_between_systems(self, other_systems: Sequence['System']) -> Tuple[
Tuple[Set['Package'], Set['Package']], List[Tuple[Set['Package'], Set['Package']]]]:
"""
Evaluates differences between this (.self) system and other systems.
:param other_systems: The other systems.
:return:
NOTICE: NONE of the following items
contains packages, which
are ALREADY INSTALLED on this
system AND will NOT be REMOVED!
In short: Do not show,
what does not change!
NOTICE FOR NOTICE: Packages with another
version are OTHER packages
hence UPDATES will be listed as
old package removed,
new package installed
LAST NOTICE: tl;dr read the sourcecode!
A tuple containing two items:
First item:
Tuple containing two items:
First item:
installed packages in comparison to this system,
which are installed in ALL other systems
Second item:
uninstalled packages in comparison to this system,
which are in ALL other systems | |
1),
(9, 19, 2, -3): (0, 1),
(9, 19, 2, -2): (0, 1),
(9, 19, 2, -1): (0, 0),
(9, 19, 2, 0): (0, 1),
(9, 19, 2, 1): (0, 1),
(9, 19, 2, 2): (0, 1),
(9, 19, 2, 3): (0, 1),
(9, 19, 2, 4): (0, 1),
(9, 19, 2, 5): (0, 1),
(9, 19, 3, -5): (0, 1),
(9, 19, 3, -4): (0, 1),
(9, 19, 3, -3): (0, 1),
(9, 19, 3, -2): (0, 1),
(9, 19, 3, -1): (0, 0),
(9, 19, 3, 0): (0, 1),
(9, 19, 3, 1): (0, 1),
(9, 19, 3, 2): (0, 1),
(9, 19, 3, 3): (0, 1),
(9, 19, 3, 4): (0, 1),
(9, 19, 3, 5): (0, 1),
(9, 19, 4, -5): (0, 1),
(9, 19, 4, -4): (0, 1),
(9, 19, 4, -3): (0, 1),
(9, 19, 4, -2): (0, 1),
(9, 19, 4, -1): (0, 0),
(9, 19, 4, 0): (0, 1),
(9, 19, 4, 1): (0, 1),
(9, 19, 4, 2): (0, 1),
(9, 19, 4, 3): (0, 1),
(9, 19, 4, 4): (0, 1),
(9, 19, 4, 5): (0, 1),
(9, 19, 5, -5): (0, 1),
(9, 19, 5, -4): (0, 1),
(9, 19, 5, -3): (0, 1),
(9, 19, 5, -2): (0, 1),
(9, 19, 5, -1): (0, 0),
(9, 19, 5, 0): (0, 1),
(9, 19, 5, 1): (0, 1),
(9, 19, 5, 2): (0, 1),
(9, 19, 5, 3): (0, 1),
(9, 19, 5, 4): (0, 1),
(9, 19, 5, 5): (0, 1),
(9, 20, -5, -5): (0, 1),
(9, 20, -5, -4): (0, 1),
(9, 20, -5, -3): (0, 1),
(9, 20, -5, -2): (0, 0),
(9, 20, -5, -1): (0, 1),
(9, 20, -5, 0): (0, 1),
(9, 20, -5, 1): (0, 1),
(9, 20, -5, 2): (0, 1),
(9, 20, -5, 3): (0, 1),
(9, 20, -5, 4): (0, 1),
(9, 20, -5, 5): (0, 1),
(9, 20, -4, -5): (0, 1),
(9, 20, -4, -4): (0, 1),
(9, 20, -4, -3): (0, 1),
(9, 20, -4, -2): (0, 0),
(9, 20, -4, -1): (0, 1),
(9, 20, -4, 0): (0, 1),
(9, 20, -4, 1): (0, 1),
(9, 20, -4, 2): (0, 1),
(9, 20, -4, 3): (1, 1),
(9, 20, -4, 4): (1, 1),
(9, 20, -4, 5): (1, 0),
(9, 20, -3, -5): (-1, 1),
(9, 20, -3, -4): (-1, 1),
(9, 20, -3, -3): (-1, 1),
(9, 20, -3, -2): (-1, 0),
(9, 20, -3, -1): (-1, 1),
(9, 20, -3, 0): (1, 1),
(9, 20, -3, 1): (1, 1),
(9, 20, -3, 2): (1, 1),
(9, 20, -3, 3): (0, 1),
(9, 20, -3, 4): (0, 1),
(9, 20, -3, 5): (0, 1),
(9, 20, -2, -5): (0, 1),
(9, 20, -2, -4): (0, 1),
(9, 20, -2, -3): (0, 1),
(9, 20, -2, -2): (0, 1),
(9, 20, -2, -1): (1, 1),
(9, 20, -2, 0): (1, 1),
(9, 20, -2, 1): (1, 1),
(9, 20, -2, 2): (1, 1),
(9, 20, -2, 3): (-1, 1),
(9, 20, -2, 4): (-1, 1),
(9, 20, -2, 5): (-1, 1),
(9, 20, -1, -5): (-1, 1),
(9, 20, -1, -4): (-1, 1),
(9, 20, -1, -3): (-1, 1),
(9, 20, -1, -2): (1, 1),
(9, 20, -1, -1): (1, 1),
(9, 20, -1, 0): (1, 1),
(9, 20, -1, 1): (1, 1),
(9, 20, -1, 2): (0, 1),
(9, 20, -1, 3): (0, 1),
(9, 20, -1, 4): (0, 0),
(9, 20, -1, 5): (0, -1),
(9, 20, 0, -5): (1, 1),
(9, 20, 0, -4): (1, 1),
(9, 20, 0, -3): (1, 1),
(9, 20, 0, -2): (1, 0),
(9, 20, 0, -1): (0, 1),
(9, 20, 0, 0): (0, 1),
(9, 20, 0, 1): (0, 1),
(9, 20, 0, 2): (-1, 1),
(9, 20, 0, 3): (-1, 1),
(9, 20, 0, 4): (-1, 0),
(9, 20, 0, 5): (-1, -1),
(9, 20, 1, -5): (0, 1),
(9, 20, 1, -4): (0, 1),
(9, 20, 1, -3): (0, 1),
(9, 20, 1, -2): (0, 0),
(9, 20, 1, -1): (0, 1),
(9, 20, 1, 0): (-1, 1),
(9, 20, 1, 1): (-1, 1),
(9, 20, 1, 2): (-1, 1),
(9, 20, 1, 3): (-1, 0),
(9, 20, 1, 4): (-1, -1),
(9, 20, 1, 5): (-1, -1),
(9, 20, 2, -5): (0, 1),
(9, 20, 2, -4): (0, 1),
(9, 20, 2, -3): (0, 1),
(9, 20, 2, -2): (0, 0),
(9, 20, 2, -1): (0, 1),
(9, 20, 2, 0): (0, 1),
(9, 20, 2, 1): (0, 1),
(9, 20, 2, 2): (0, 1),
(9, 20, 2, 3): (0, 1),
(9, 20, 2, 4): (0, 1),
(9, 20, 2, 5): (0, 1),
(9, 20, 3, -5): (0, 1),
(9, 20, 3, -4): (0, 1),
(9, 20, 3, -3): (0, 1),
(9, 20, 3, -2): (0, 0),
(9, 20, 3, -1): (0, 1),
(9, 20, 3, 0): (0, 1),
(9, 20, 3, 1): (0, 1),
(9, 20, 3, 2): (0, 1),
(9, 20, 3, 3): (0, 1),
(9, 20, 3, 4): (0, 1),
(9, 20, 3, 5): (0, 1),
(9, 20, 4, -5): (0, 1),
(9, 20, 4, -4): (0, 1),
(9, 20, 4, -3): (0, 1),
(9, 20, 4, -2): (0, 0),
(9, 20, 4, -1): (0, 1),
(9, 20, 4, 0): (0, 1),
(9, 20, 4, 1): (0, 1),
(9, 20, 4, 2): (0, 1),
(9, 20, 4, 3): (0, 1),
(9, 20, 4, 4): (0, 1),
(9, 20, 4, 5): (0, 1),
(9, 20, 5, -5): (0, 1),
(9, 20, 5, -4): (0, 1),
(9, 20, 5, -3): (0, 1),
(9, 20, 5, -2): (0, 0),
(9, 20, 5, -1): (0, 1),
(9, 20, 5, 0): (0, 1),
(9, 20, 5, 1): (0, 1),
(9, 20, 5, 2): (0, 1),
(9, 20, 5, 3): (0, 1),
(9, 20, 5, 4): (0, 1),
(9, 20, 5, 5): (0, 1),
(9, 21, -5, -5): (0, 1),
(9, 21, -5, -4): (0, 1),
(9, 21, -5, -3): (0, 0),
(9, 21, -5, -2): (0, 1),
(9, 21, -5, -1): (0, 1),
(9, 21, -5, 0): (0, 1),
(9, 21, -5, 1): (0, 1),
(9, 21, -5, 2): (0, 1),
(9, 21, -5, 3): (0, 1),
(9, 21, -5, 4): (0, 1),
(9, 21, -5, 5): (0, 1),
(9, 21, -4, -5): (0, 1),
(9, 21, -4, -4): (0, 1),
(9, 21, -4, -3): (0, 0),
(9, 21, -4, -2): (0, 1),
(9, 21, -4, -1): (0, 1),
(9, 21, -4, 0): (0, 1),
(9, 21, -4, 1): (0, 1),
(9, 21, -4, 2): (0, 1),
(9, 21, -4, 3): (1, 1),
(9, 21, -4, 4): (1, 1),
(9, 21, -4, 5): (1, 0),
(9, 21, -3, -5): (-1, 1),
(9, 21, -3, -4): (-1, 1),
(9, 21, -3, -3): (-1, 0),
(9, 21, -3, -2): (-1, 1),
(9, 21, -3, -1): (-1, 1),
(9, 21, -3, 0): (-1, 1),
(9, 21, -3, 1): (1, 1),
(9, 21, -3, 2): (1, 1),
(9, 21, -3, 3): (0, 1),
(9, 21, -3, 4): (0, 1),
(9, 21, -3, 5): (0, 1),
(9, 21, -2, -5): (0, 1),
(9, 21, -2, -4): (0, 1),
(9, 21, -2, -3): (0, 1),
(9, 21, -2, -2): (0, 1),
(9, 21, -2, -1): (1, 1),
(9, 21, -2, 0): (1, 1),
(9, 21, -2, 1): (1, 1),
(9, 21, -2, 2): (1, 1),
(9, 21, -2, 3): (-1, 1),
(9, 21, -2, 4): (-1, 1),
(9, 21, -2, 5): (-1, 1),
(9, 21, -1, -5): (-1, 1),
(9, 21, -1, -4): (-1, 1),
(9, 21, -1, -3): (-1, 1),
(9, 21, -1, -2): (-1, 1),
(9, 21, -1, -1): (1, 1),
(9, 21, -1, 0): (1, 1),
(9, | |
path["Games"].values():
if "Access Level" not in x:
x["Access Level"] = 0
if "Min" in path["Games"]["Allin"]:
path["Games"]["Allin"].pop("Min")
if "Max" in path["Games"]["Allin"]:
path["Games"]["Allin"].pop("Max")
for x in trash:
if x in path["System Config"]:
path["System Config"].pop(x)
for x in path["Players"]:
if "CD" in path["Players"][x]:
path["Players"][x]["Cooldowns"] = path["Players"][x].pop("CD")
raw = [(x.split(" ", 1)[0], y) for x, y in
path["Players"][x]["Cooldowns"].items()]
raw.append(("Payday", 0))
new_dict = dict(raw)
path["Players"][x]["Cooldowns"] = new_dict
if "Membership" not in path["Players"][x]:
path["Players"][x]["Membership"] = None
if "Pending" not in path["Players"][x]:
path["Players"][x]["Pending"] = 0
self.save_system()
class PluralDict(dict):
"""This class is used to plural strings
You can plural strings based on the value input when using this class as a dictionary.
"""
def __missing__(self, key):
if '(' in key and key.endswith(')'):
key, rest = key.split('(', 1)
value = super().__getitem__(key)
suffix = rest.rstrip(')').split(',')
if len(suffix) == 1:
suffix.insert(0, '')
return suffix[0] if value <= 1 else suffix[1]
raise KeyError(key)
class Casino:
"""Play Casino minigames and earn chips that integrate with Economy!
Any user can join casino by using the casino join command. Casino uses hooks from economy to
cash in/out chips. You are able to create your own casino name and chip name. Casino comes with
7 mini games that you can set min/max bets, multipliers, and access levels. Check out all of the
admin settings by using commands in the setcasino group. For additional information please
check out the wiki on my github.
"""
__slots__ = ['bot', 'file_path', 'version', 'legacy_available', 'legacy_path', 'legacy_system',
'casino_bank', 'cycle_task']
def __init__(self, bot):
self.bot = bot
try: # This allows you to port accounts from older versions of casino
self.legacy_path = "data/casino/casino.json"
self.legacy_system = dataIO.load_json(self.legacy_path)
self.legacy_available = True
except FileNotFoundError:
self.legacy_available = False
self.file_path = "data/JumperCogs/casino/casino.json"
self.casino_bank = CasinoBank(bot, self.file_path)
self.version = "1.7.15"
self.cycle_task = bot.loop.create_task(self.membership_updater())
@commands.group(pass_context=True, no_pm=True)
async def casino(self, ctx):
"""Casino Group Commands"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@casino.command(name="language", pass_context=True, hidden=True)
@checks.is_owner()
async def _language_casino(self, ctx):
"""Changes the output language in casino.
Default is English
"""
author = ctx.message.author
languages = {_("English"): "en", _("Spanish"): "es", _("Brazilian-Portuguese"): "br",
_("Danish"): "da", _("Malaysian"): "zsm", _("German"): "de",
_("Mandarin"): "cmn"}
l_out = ", ".join(list(languages.keys())[:-2] + [" or ".join(list(languages.keys())[-2:])])
await self.bot.say(_("I can change the language to \n{}\nWhich language would you prefer? "
"\n*Note this change will affect every server.*").format(l_out))
response = await self.bot.wait_for_message(timeout=15, author=author)
if response is None:
return await self.bot.say(_("No response. Cancelling language change."))
if response.content.title() in languages:
language = languages[response.content.title()]
lang = gettext.translation('casino', localedir='data/JumperCogs/casino/data',
languages=[language])
lang.install()
fp = "data/JumperCogs/casino/data/languages.json"
l_data = dataIO.load_json(fp)
l_data["Language"] = language
dataIO.save_json(fp, l_data)
await self.bot.say(_("The language is now set to {}").format(response.content.title()))
else:
return await self.bot.say(_("That language is not supported."))
@casino.command(name="purge", pass_context=True)
@checks.is_owner()
async def _purge_casino(self, ctx):
"""Removes all servers that the bot is no longer on.
If your JSON file is getting rather large, utilize this
command. It is possible that if your bot is on a ton of
servers, there are many that it is no longer running on.
This will remove them from the JSON file.
"""
author = ctx.message.author
servers = self.casino_bank.get_all_servers()
purge_list = [x for x in servers if self.bot.get_server(x) is None]
if not purge_list:
return await self.bot.say("There are no servers for me to purge at this time.")
await self.bot.say(_("I found {} server(s) I am no longer on. Would you like for me to "
"delete their casino data?").format(len(purge_list)))
response = await self.bot.wait_for_message(timeout=15, author=author)
if response is None:
return await self.bot.say(_("You took too long to answer. Canceling purge."))
if response.content.title() == _("Yes"):
for x in purge_list:
servers.pop(x)
self.casino_bank.save_system()
await self.bot.say(_("{} server entries have been erased.").format(len(purge_list)))
else:
return await self.bot.say(_("Incorrect response. This is a yes or no question."))
@casino.command(name="forceupdate", pass_context=True)
@checks.is_owner()
async def _forceupdate_casino(self, ctx):
"""Force applies older patches
This command will attempt to update your JSON with the
new dictionary keys. If you are having issues with your JSON
having a lot of key errors, namely Cooldown, then try using
this command. THIS DOES NOT UPDATE CASINO
"""
server = ctx.message.server
settings = self.casino_bank.check_server_settings(server)
self.casino_bank.patch_1581(settings)
self.casino_bank.patch_16(settings)
self.casino_bank.patch_games(settings)
self.casino_bank.patch_1694(settings)
await self.bot.say(_("Force applied three previous JSON updates. Please reload casino."))
@casino.command(name="memberships", pass_context=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def _memberships_casino(self, ctx):
"""Shows all memberships on the server."""
server = ctx.message.server
settings = self.casino_bank.check_server_settings(server)
memberships = settings["Memberships"].keys()
if memberships:
await self.bot.say(_("Available Memberships:```\n{}```").format('\n'.join(memberships)))
else:
await self.bot.say(_("There are no memberships."))
@casino.command(name="join", pass_context=True)
async def _join_casino(self, ctx):
"""Grants you membership access to the casino"""
user = ctx.message.author
settings = self.casino_bank.check_server_settings(user.server)
try:
self.casino_bank.create_account(user)
except UserAlreadyRegistered:
return await self.bot.say(_("{} already has a casino membership").format(user.name))
else:
name = settings["System Config"]["Casino Name"]
await self.bot.say(_("Your membership has been approved! Welcome to {} Casino!\nAs a "
"first time member we have credited your account with 100 free "
"chips.\nHave fun!").format(name))
@casino.command(name="transfer", pass_context=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def _transfer_casino(self, ctx, user: discord.Member, chips: int):
"""Transfers chips to another player"""
author = ctx.message.author
settings = self.casino_bank.check_server_settings(author.server)
chip_name = settings["System Config"]["Chip Name"]
limit = settings["System Config"]["Transfer Limit"]
if not self.casino_bank.membership_exists(author):
return await self.bot.say("{} is not registered to the casino.".format(author.name))
if not self.casino_bank.membership_exists(user):
return await self.bot.say("{} is not registered to the casino.".format(user.name))
if chips > limit:
return await self.bot.say(_("Your transfer cannot exceed the server limit of {} {} "
"chips.").format(limit, chip_name))
chip_name = settings["System Config"]["Chip Name"]
cooldown = self.check_cooldowns(user, "Transfer", settings, triggered=True)
if not cooldown:
try:
self.casino_bank.transfer_chips(author, user, chips)
except NegativeChips:
return await self.bot.say(_("An amount cannot be negative."))
except SameSenderAndReceiver:
return await self.bot.say(_("Sender and Reciever cannot be the same."))
except BotNotAUser:
return await self.bot.say(_("You can send chips to a bot."))
except InsufficientChips:
return await self.bot.say(_("Not enough chips to transfer."))
else:
logger.info("{}({}) transferred {} {} to {}({}).".format(author.name, author.id,
chip_name, chips,
user.name, user.id))
await self.bot.say(_("{} transferred {} {} to {}.").format(author.name, chip_name,
chips, user.name))
else:
await self.bot.say(cooldown)
@casino.command(name="acctransfer", pass_context=True)
async def _acctransfer_casino(self, ctx):
"""Transfers account info from old casino. Limit 1 transfer per user"""
user = ctx.message.author
settings = self.casino_bank.check_server_settings(user.server)
if not self.casino_bank.membership_exists(user):
msg = _("I can't transfer data if you already have an account with the new casino.")
elif not self.legacy_available:
msg = _("No legacy file was found. Unable to perform membership transfers.")
elif user.id in self.legacy_system["Players"]:
await self.bot.say(_("Account for {} found. Your casino data will be transferred to "
"the {} server. After your data is transferred your old data will "
"be deleted. I can only transfer data **one time**.\nDo you wish "
"to transfer?").format(user.name, user.server.name))
response = await self.bot.wait_for_message(timeout=15, author=user)
if response is None:
msg = _("No response, transfer cancelled.")
elif response.content.title() == _("No"):
msg = _("Transfer cancelled.")
elif response.content.title() == _("Yes"):
old_data = self.legacy_system["Players"][user.id]
transfer = {user.id: old_data}
settings["Players"].update(transfer)
self.legacy_system["Players"].pop(user.id)
dataIO.save_json(self.legacy_path, self.legacy_system)
self.casino_bank.patch_1581(settings)
self.casino_bank.patch_16(settings)
self.casino_bank.patch_games(settings)
self.casino_bank.patch_1694(settings)
self.casino_bank.save_system()
msg = _("Data transfer successful. You can now access your old casino data.")
else:
msg = _("Improper response. Please state yes or no. Cancelling transfer.")
else:
msg = _("Unable to locate your previous data.")
await self.bot.say(msg)
@casino.command(name="leaderboard", pass_context=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def _leaderboard_casino(self, ctx, sort="top"):
"""Displays Casino Leaderboard"""
user = ctx.message.author
self.casino_bank.check_server_settings(user.server)
members = self.casino_bank.get_server_memberships(user.server)
if sort not in ["top", "bottom", "place"]:
sort = "top"
if members:
players = [(x["Name"], x["Chips"]) for x in members.values()]
pos = [x + 1 for x, y in enumerate(players)]
if sort == "bottom":
style = sorted(players, key=itemgetter(1))
rev_pos = list(reversed(pos))
players, chips = zip(*style)
data = list(zip(rev_pos, players, chips))
elif sort == "place":
style = sorted([[x["Name"], x["Chips"]] if x["Name"] != user.name
else ["[" + x["Name"] + "]", x["Chips"]]
for x in members.values()], key=itemgetter(1), reverse=True)
players, chips = zip(*style)
data = list(zip(pos, players, chips))
else:
style = sorted(players, key=itemgetter(1), reverse=True)
players, chips = zip(*style)
data = list(zip(pos, players, chips))
headers = [_("Rank"), _("Names"), _("Chips")]
msg = await self.table_split(user, headers, data, sort)
else:
msg = _("There are no casino players to show on the leaderboard.")
await self.bot.say(msg)
@casino.command(name="exchange", pass_context=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def _exchange_casino(self, ctx, currency: str, amount: int):
"""Exchange chips for credits and credits for chips"""
# Declare all variables here
user = ctx.message.author
settings = self.casino_bank.check_server_settings(user.server)
bank = self.bot.get_cog('Economy').bank
currency = currency.title()
chip_rate = settings["System Config"]["Chip Rate"]
credit_rate = | |
"""
This module calculates corrections for the species listed below, fitted to the experimental and computed
entries given to the CorrectionCalculator constructor.
"""
import warnings
from collections import OrderedDict
from typing import Dict, List, Tuple, Union, Sequence
try:
import ruamel.yaml as yaml
except ImportError:
try:
import ruamel_yaml as yaml # type: ignore # noqa
except ImportError:
import yaml # type: ignore # noqa
import numpy as np
import plotly.graph_objects as go
from monty.serialization import loadfn
from scipy.optimize import curve_fit
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.analysis.structure_analyzer import sulfide_type
def _func(x, *m):
"""
Helper function for curve_fit.
"""
return np.dot(x, m)
class CorrectionCalculator:
"""
A CorrectionCalculator contains experimental and computed entries which it uses to compute corrections.
It graphs residual errors after applying the computed corrections and creates the MPCompatibility.yaml
file the Correction classes use.
Attributes:
species: list of species that corrections are being calculated for
exp_compounds: list of dictionaries which each contain a compound's formula and experimental data
calc_compounds: dictionary of ComputedEntry objects
corrections: list of corrections in same order as species list
corrections_std_error: list of the variances of the corrections in same order as species list
corrections_dict: dictionary of format {'species': (value, uncertainty)} for easier correction lookup
"""
def __init__(
self,
species: Sequence[str] = (
"oxide",
"peroxide",
"superoxide",
"S",
"F",
"Cl",
"Br",
"I",
"N",
"Se",
"Si",
"Sb",
"Te",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"W",
"Mo",
"H",
),
max_error: float = 0.1,
allow_unstable: Union[float, bool] = 0.1,
exclude_polyanions: Sequence[str] = (
"SO4",
"CO3",
"NO3",
"OCl3",
"SiO4",
"SeO3",
"TiO3",
"TiO4",
),
) -> None:
"""
Initializes a CorrectionCalculator.
Args:
species: list of species to calculate corrections for
max_error: maximum tolerable relative uncertainty in experimental energy.
Compounds with relative uncertainty greater than this value will be excluded from the fit
allow_unstable: whether unstable entries are to be included in the fit. If True, all compounds will
be included regardless of their energy above hull. If False or a float, compounds with
energy above hull greater than the given value (defaults to 0.1 eV/atom) will be
excluded
exclude_polyanions: a list of polyanions that contain additional sources of error that may negatively
influence the quality of the fitted corrections. Compounds with these polyanions
will be excluded from the fit
"""
self.species = species
self.max_error = max_error
if not allow_unstable:
self.allow_unstable = 0.1
else:
self.allow_unstable = allow_unstable
self.exclude_polyanions = exclude_polyanions
self.corrections: List[float] = []
self.corrections_std_error: List[float] = []
self.corrections_dict: Dict[str, Tuple[float, float]] = {} # {'species': (value, uncertainty)}
# to help the graph_residual_error_per_species() method differentiate between oxygen containing compounds
if "oxide" in self.species:
self.oxides: List[str] = []
if "peroxide" in self.species:
self.peroxides: List[str] = []
if "superoxide" in self.species:
self.superoxides: List[str] = []
if "S" in self.species:
self.sulfides: List[str] = []
def compute_from_files(self, exp_gz: str, comp_gz: str):
"""
Args:
exp_gz: name of .json.gz file that contains experimental data
data in .json.gz file should be a list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
comp_gz: name of .json.gz file that contains computed entries
data in .json.gz file should be a dictionary of {chemical formula: ComputedEntry}
"""
exp_entries = loadfn(exp_gz)
calc_entries = loadfn(comp_gz)
return self.compute_corrections(exp_entries, calc_entries)
def compute_corrections(self, exp_entries: list, calc_entries: dict) -> dict:
"""
Computes the corrections and fills in correction, corrections_std_error, and corrections_dict.
Args:
exp_entries: list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
calc_entries: dictionary of computed entries, of the form {chemical formula: ComputedEntry}
Raises:
ValueError: calc_compounds is missing an entry
"""
self.exp_compounds = exp_entries
self.calc_compounds = calc_entries
self.names: List[str] = []
self.diffs: List[float] = []
self.coeff_mat: List[List[float]] = []
self.exp_uncer: List[float] = []
# remove any corrections in calc_compounds
for entry in self.calc_compounds.values():
entry.correction = 0
for cmpd_info in self.exp_compounds:
# to get consistent element ordering in formula
name = Composition(cmpd_info["formula"]).reduced_formula
allow = True
compound = self.calc_compounds.get(name, None)
if not compound:
warnings.warn(
"Compound {} is not found in provided computed entries and is excluded from the fit".format(name)
)
continue
# filter out compounds with large uncertainties
relative_uncertainty = abs(cmpd_info["uncertainty"] / cmpd_info["exp energy"])
if relative_uncertainty > self.max_error:
allow = False
warnings.warn(
"Compound {} is excluded from the fit due to high experimental uncertainty ({}%)".format(
name, relative_uncertainty
)
)
# filter out compounds containing certain polyanions
for anion in self.exclude_polyanions:
if anion in name or anion in cmpd_info["formula"]:
allow = False
warnings.warn(
"Compound {} contains the polyanion {} and is excluded from the fit".format(name, anion)
)
break
# filter out compounds that are unstable
if isinstance(self.allow_unstable, float):
try:
eah = compound.data["e_above_hull"]
except KeyError:
raise ValueError("Missing e above hull data")
if eah > self.allow_unstable:
allow = False
warnings.warn(
"Compound {} is unstable and excluded from the fit (e_above_hull = {})".format(name, eah)
)
if allow:
comp = Composition(name)
elems = list(comp.as_dict())
reactants = []
for elem in elems:
try:
elem_name = Composition(elem).reduced_formula
reactants.append(self.calc_compounds[elem_name])
except KeyError:
raise ValueError("Computed entries missing " + elem)
rxn = ComputedReaction(reactants, [compound])
rxn.normalize_to(comp)
energy = rxn.calculated_reaction_energy
coeff = []
for specie in self.species:
if specie == "oxide":
if compound.data["oxide_type"] == "oxide":
coeff.append(comp["O"])
self.oxides.append(name)
else:
coeff.append(0)
elif specie == "peroxide":
if compound.data["oxide_type"] == "peroxide":
coeff.append(comp["O"])
self.peroxides.append(name)
else:
coeff.append(0)
elif specie == "superoxide":
if compound.data["oxide_type"] == "superoxide":
coeff.append(comp["O"])
self.superoxides.append(name)
else:
coeff.append(0)
elif specie == "S":
if Element("S") in comp:
sf_type = "sulfide"
if compound.data.get("sulfide_type"):
sf_type = compound.data["sulfide_type"]
elif hasattr(compound, "structure"):
sf_type = sulfide_type(compound.structure)
if sf_type == "sulfide":
coeff.append(comp["S"])
self.sulfides.append(name)
else:
coeff.append(0)
else:
coeff.append(0)
else:
try:
coeff.append(comp[specie])
except ValueError:
raise ValueError("We can't detect this specie: {}".format(specie))
self.names.append(name)
self.diffs.append((cmpd_info["exp energy"] - energy) / comp.num_atoms)
self.coeff_mat.append([i / comp.num_atoms for i in coeff])
self.exp_uncer.append((cmpd_info["uncertainty"]) / comp.num_atoms)
# for any exp entries with no uncertainty value, assign average uncertainty value
sigma = np.array(self.exp_uncer)
sigma[sigma == 0] = np.nan
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=RuntimeWarning
) # numpy raises warning if the entire array is nan values
mean_uncer = np.nanmean(sigma)
sigma = np.where(np.isnan(sigma), mean_uncer, sigma)
if np.isnan(mean_uncer):
# no uncertainty values for any compounds, don't try to weight
popt, self.pcov = curve_fit(_func, self.coeff_mat, self.diffs, p0=np.ones(len(self.species)))
else:
popt, self.pcov = curve_fit(
_func,
self.coeff_mat,
self.diffs,
p0=np.ones(len(self.species)),
sigma=sigma,
absolute_sigma=True,
)
self.corrections = popt.tolist()
self.corrections_std_error = np.sqrt(np.diag(self.pcov)).tolist()
for i in range(len(self.species)):
self.corrections_dict[self.species[i]] = (
round(self.corrections[i], 3),
round(self.corrections_std_error[i], 4),
)
return self.corrections_dict
def graph_residual_error(self) -> go.Figure:
"""
Graphs the residual errors for all compounds after applying computed corrections.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_graph = self.names.copy()
abs_errors, labels_graph = (list(t) for t in zip(*sorted(zip(abs_errors, labels_graph)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_graph,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors"),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(self.diffs)))))
print("Mean = " + str(abs(np.mean(np.array(self.diffs)))))
print("Std Dev = " + str(np.std(np.array(self.diffs))))
return fig
def graph_residual_error_per_species(self, specie: str) -> go.Figure:
"""
Graphs the residual errors for each compound that contains specie after applying computed corrections.
Args:
specie: the specie/group that residual errors are being plotted for
Raises:
ValueError: the specie is not a valid specie that this class fits corrections for
"""
if specie not in self.species:
raise ValueError("not a valid specie")
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_species = self.names.copy()
diffs_cpy = self.diffs.copy()
num = len(labels_species)
if specie in ("oxide", "peroxide", "superoxide", "S"):
if specie == "oxide":
compounds = self.oxides
elif specie == "peroxide":
compounds = self.peroxides
elif specie == "superoxides":
compounds = self.superoxides
else:
compounds = self.sulfides
for i in range(num):
if labels_species[num - i - 1] not in compounds:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del | |
<filename>projectFile.py
from flask_website import app
app.run(debug=True)
#!/usr/bin/env python
from flask_website import app
from flask_website.search import update_documentation_index
with app.test_request_context():
update_documentation_index()
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = 'testkey'
DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'flask-website.db')
DATABASE_CONNECT_OPTIONS = {}
ADMINS = frozenset(['http://lucumr.pocoo.org/'])
WHOOSH_INDEX = os.path.join(_basedir, 'flask-website.whoosh')
DOCUMENTATION_PATH = os.path.join(_basedir, '../flask/docs/_build/dirhtml')
del os
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
# -*- coding: utf-8 -*-
from urlparse import urlparse
from werkzeug import url_quote
from flask import Markup
class Extension(object):
def __init__(self, name, author, description,
github=None, gitlab=None, bitbucket=None, docs=None, website=None,
approved=False, notes=None):
self.name = name
self.author = author
self.description = Markup(description)
self.github = github
self.gitlab = gitlab
self.bitbucket = bitbucket
self.docs = docs
self.website = website
self.approved = approved
self.notes = notes
def to_json(self):
rv = vars(self).copy()
rv['description'] = unicode(rv['description'])
return rv
@property
def pypi(self):
return 'http://pypi.python.org/pypi/%s' % url_quote(self.name)
@property
def docserver(self):
if self.docs:
return urlparse(self.docs)[1]
# This list contains all extensions that were approved as well as those which
# passed listing.
extensions = [
Extension('Flask-OpenID', '<NAME>',
description='''
<p>Adds <a href="http://openid.net/">OpenID</a> support to Flask.
''',
github='mitsuhiko/flask-openid',
docs='http://pythonhosted.org/Flask-OpenID/',
notes='''
Short long description, missing tests.
'''
),
Extension('Flask-Babel', '<NAME>',
description='''
<p>Adds i18n/l10n support to Flask, based on
<a href=http://babel.edgewall.org/>babel</a> and
<a href=http://pytz.sourceforge.net/>pytz</a>.
''',
github='mitsuhiko/flask-babel',
docs='http://pythonhosted.org/Flask-Babel/',
approved=True,
notes='''
How to improve: add a better long description to the next release.
'''
),
Extension('Flask-SQLAlchemy', '<NAME>',
description='''
<p>Add <a href="http://www.sqlalchemy.org/">SQLAlchemy</a> support to Flask
with automatic configuration and helpers to simplify common web use cases.
Major features include:</p>
<ul>
<li>Handle configuring one or more database connections.</li>
<li>Set up sessions scoped to the request/response cycle.</li>
<li>Time queries and track model changes for debugging.</li>
</ul>
''',
github='mitsuhiko/flask-sqlalchemy',
docs='http://flask-sqlalchemy.pocoo.org/',
approved=True
),
Extension('Flask-Migrate', '<NAME>',
description='''
<p><a href="http://www.sqlalchemy.org/">SQLAlchemy</a> database
migrations for Flask applications using
<a href="https://alembic.readthedocs.org/">Alembic</a>. The
database operations are provided as command line arguments for
<a href="https://flask-script.readthedocs.org/">Flask-Script</a>.
''',
github='miguelgrinberg/flask-migrate',
docs='http://pythonhosted.org/Flask-Migrate/',
),
Extension('Flask-XML-RPC', '<NAME>',
description='''
<p>Adds <a href="http://www.xmlrpc.com/">XML-RPC</a> support to Flask.
''',
bitbucket='leafstorm/flask-xml-rpc',
docs='http://pythonhosted.org/Flask-XML-RPC/',
approved=True
),
Extension('Flask-CouchDB', '<NAME>',
description='''
<p>Adds <a href="http://couchdb.apache.org/">CouchDB</a> support to Flask.
''',
bitbucket='leafstorm/flask-couchdb',
docs='http://pythonhosted.org/Flask-CouchDB/',
approved=True,
notes='''
There is also Flask-CouchDBKit. Both are fine because they are
doing different things, but the latter is not yet approved.
'''
),
Extension('Flask-Uploads', '<NAME>',
description='''
<p>Flask-Uploads allows your application to flexibly and
efficiently handle file uploading and serving the uploaded files.
You can create different sets of uploads - one for document
attachments, one for photos, etc.
''',
github='maxcountryman/flask-uploads',
docs='https://flask-uploads.readthedocs.org/en/latest/',
approved=True
),
Extension('Flask-Themes', '<NAME>',
description='''
<p>Flask-Themes makes it easy for your application to support
a wide range of appearances.
''',
bitbucket='leafstorm/flask-themes',
docs='http://pythonhosted.org/Flask-Themes/',
approved=True
),
Extension('Flask-CouchDBKit', '<NAME>',
description='''
<p>Adds <a href="http://www.couchdbkit.org/">CouchDBKit</a> support to Flask.
''',
github='sirn/flask-couchdbkit',
docs='http://pythonhosted.org/Flask-CouchDBKit/'
),
Extension('Flask-Genshi', '<NAME>',
description='''
<p>Adds support for the <a href="http://genshi.edgewall.org/">Genshi</a>
templating language to Flask applications.
''',
github='dag/flask-genshi',
docs='http://pythonhosted.org/Flask-Genshi/',
approved=True,
notes='''
This is the first template engine extension. When others come
around it would be a good idea to decide on a common interface.
'''
),
Extension('Flask-Mail', '<NAME> (created by <NAME>)',
description='''
<p>Makes sending mails from Flask applications very easy and
has also support for unittesting.
''',
github='mattupstate/flask-mail',
docs='http://pythonhosted.org/Flask-Mail/',
approved=True
),
Extension('Flask-WTF', '<NAME> (created by <NAME>)',
description='''
<p>Flask-WTF offers simple integration with WTForms. This
integration includes optional CSRF handling for greater security.
''',
github='ajford/flask-wtf',
docs='http://pythonhosted.org/Flask-WTF/',
approved=True
),
Extension('Flask-Testing', u'<NAME> (created by <NAME>)',
description='''
<p>The Flask-Testing extension provides unit testing utilities for Flask.
''',
github='jarus/flask-testing',
docs='http://pythonhosted.org/Flask-Testing/',
approved=True
),
Extension('Flask-Script', '<NAME> (created by <NAME>)',
description='''
<p>The Flask-Script extension provides support for writing external
scripts in Flask. It uses argparse to parse command line arguments.
''',
github='techniq/flask-script',
docs='http://pythonhosted.org/Flask-Script/',
approved=True,
notes='''
Flask-Actions has some overlap. Consider that when approving
Flask-Actions or similar packages.
'''
),
Extension('flask-lesscss', '<NAME>',
description='''
<p>
A small Flask extension that makes it easy to use
<a href=http://lesscss.org/>LessCSS</a> with your
Flask application.
''',
docs='http://sjl.bitbucket.org/flask-lesscss/',
bitbucket='sjl/flask-lesscss',
notes='''
Broken package description, nonconforming package name, does not
follow standard API rules (init_lesscss instead of lesscss).
Considered for unlisting, improved version should release as
"Flask-LessCSS" with a conforming API and fixed packages indices,
as well as a testsuite.
'''
),
Extension('Flask-Creole', '<NAME>',
description='''
<p>Creole parser filters for Flask.
''',
docs='http://pythonhosted.org/Flask-Creole',
bitbucket='aafshar/flask-creole-main',
approved=True,
notes='''
Flask-Markdown and this should share API, consider that when
approving Flask-Markdown
'''
),
Extension('Flask-Cache', '<NAME>',
description='''
<p>Adds cache support to your Flask application.
''',
docs='http://pythonhosted.org/Flask-Cache',
github='thadeusb/flask-cache',
),
Extension('Flask-Principal', '<NAME>',
description='''
<p>Identity management for Flask.
''',
docs='http://pythonhosted.org/Flask-Principal',
github='mattupstate/flask-principal',
approved=False
),
Extension('Flask-Zen', '<NAME>',
description='''
<p>Flask-Zen allows you to use PyZen via Flask-Script commands.
''',
docs='http://pythonhosted.org/Flask-Zen/',
github='coderanger/flask-zen',
approved=False
),
Extension('Flask-Static-Compress', '<NAME>',
description='''
<p>Automatically minifies, combines, and versions your static CSS
and JavaScript assets. Like Django-Compressor for Flask.
''',
github='alanhamlett/flask-static-compress',
docs='https://github.com/alanhamlett/flask-static-compress',
approved=False
),
Extension('Flask-Assets', u'<NAME>',
description='''
<p>
Integrates the webassets library with Flask, adding support for
merging, minifying and compiling CSS and Javascript files.
''',
docs='http://elsdoerfer.name/docs/flask-assets/',
github='miracle2k/flask-assets',
approved=False
),
Extension('Flask-AutoIndex', '<NAME>',
description='''
<p>
An extension that generates an index page for your Flask
application automatically
''',
docs='http://pythonhosted.org/Flask-AutoIndex/',
github='sublee/flask-autoindex',
approved=False
),
Extension('Flask-Celery', '<NAME>',
description='''
<p>
Celery integration for Flask
''',
docs='http://ask.github.com/celery/',
github='ask/flask-celery',
approved=False
),
Extension('Flask-Cors', '<NAME>',
description='''
<p>
Cross Origin Resource Sharing (CORS) for flask
''',
docs='http://flask-cors.readthedocs.org/en/latest/',
github='wcdolphin/flask-cors',
approved=False
),
Extension('Frozen-Flask', '<NAME>',
description='''
<p>
Freezes a Flask application into a set of static files.
The result can be hosted without any server-side software
other than a traditional web server.
''',
docs='http://pythonhosted.org/Frozen-Flask/',
github='SimonSapin/Frozen-Flask',
approved=True
),
Extension('Flask-FlatPages', '<NAME>',
description='''
<p>
Provides flat static pages to a Flask application, based on | |
E501
if self.local_vars_configuration.client_side_validation and current_state not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `current_state` ({0}), must be one of {1}".format(current_state, allowed_values)) # noqa: E501
self._current_state = current_state
@property
def link_rel_canonical_url(self):
"""Gets the link_rel_canonical_url of this BlogPost. # noqa: E501
Optional override to set the URL to be used in the rel=canonical link tag on the page. # noqa: E501
:return: The link_rel_canonical_url of this BlogPost. # noqa: E501
:rtype: str
"""
return self._link_rel_canonical_url
@link_rel_canonical_url.setter
def link_rel_canonical_url(self, link_rel_canonical_url):
"""Sets the link_rel_canonical_url of this BlogPost.
Optional override to set the URL to be used in the rel=canonical link tag on the page. # noqa: E501
:param link_rel_canonical_url: The link_rel_canonical_url of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and link_rel_canonical_url is None: # noqa: E501
raise ValueError("Invalid value for `link_rel_canonical_url`, must not be `None`") # noqa: E501
self._link_rel_canonical_url = link_rel_canonical_url
@property
def featured_image(self):
"""Gets the featured_image of this BlogPost. # noqa: E501
The featuredImage of this Blog Post. # noqa: E501
:return: The featured_image of this BlogPost. # noqa: E501
:rtype: str
"""
return self._featured_image
@featured_image.setter
def featured_image(self, featured_image):
"""Sets the featured_image of this BlogPost.
The featuredImage of this Blog Post. # noqa: E501
:param featured_image: The featured_image of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and featured_image is None: # noqa: E501
raise ValueError("Invalid value for `featured_image`, must not be `None`") # noqa: E501
self._featured_image = featured_image
@property
def featured_image_alt_text(self):
"""Gets the featured_image_alt_text of this BlogPost. # noqa: E501
Alt Text of the featuredImage. # noqa: E501
:return: The featured_image_alt_text of this BlogPost. # noqa: E501
:rtype: str
"""
return self._featured_image_alt_text
@featured_image_alt_text.setter
def featured_image_alt_text(self, featured_image_alt_text):
"""Sets the featured_image_alt_text of this BlogPost.
Alt Text of the featuredImage. # noqa: E501
:param featured_image_alt_text: The featured_image_alt_text of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and featured_image_alt_text is None: # noqa: E501
raise ValueError("Invalid value for `featured_image_alt_text`, must not be `None`") # noqa: E501
self._featured_image_alt_text = featured_image_alt_text
@property
def public_access_rules_enabled(self):
"""Gets the public_access_rules_enabled of this BlogPost. # noqa: E501
Boolean to determine whether or not to respect publicAccessRules. # noqa: E501
:return: The public_access_rules_enabled of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._public_access_rules_enabled
@public_access_rules_enabled.setter
def public_access_rules_enabled(self, public_access_rules_enabled):
"""Sets the public_access_rules_enabled of this BlogPost.
Boolean to determine whether or not to respect publicAccessRules. # noqa: E501
:param public_access_rules_enabled: The public_access_rules_enabled of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and public_access_rules_enabled is None: # noqa: E501
raise ValueError("Invalid value for `public_access_rules_enabled`, must not be `None`") # noqa: E501
self._public_access_rules_enabled = public_access_rules_enabled
@property
def public_access_rules(self):
"""Gets the public_access_rules of this BlogPost. # noqa: E501
Rules for require member registration to access private content. # noqa: E501
:return: The public_access_rules of this BlogPost. # noqa: E501
:rtype: list[object]
"""
return self._public_access_rules
@public_access_rules.setter
def public_access_rules(self, public_access_rules):
"""Sets the public_access_rules of this BlogPost.
Rules for require member registration to access private content. # noqa: E501
:param public_access_rules: The public_access_rules of this BlogPost. # noqa: E501
:type: list[object]
"""
if self.local_vars_configuration.client_side_validation and public_access_rules is None: # noqa: E501
raise ValueError("Invalid value for `public_access_rules`, must not be `None`") # noqa: E501
self._public_access_rules = public_access_rules
@property
def layout_sections(self):
"""Gets the layout_sections of this BlogPost. # noqa: E501
:return: The layout_sections of this BlogPost. # noqa: E501
:rtype: dict(str, LayoutSection)
"""
return self._layout_sections
@layout_sections.setter
def layout_sections(self, layout_sections):
"""Sets the layout_sections of this BlogPost.
:param layout_sections: The layout_sections of this BlogPost. # noqa: E501
:type: dict(str, LayoutSection)
"""
if self.local_vars_configuration.client_side_validation and layout_sections is None: # noqa: E501
raise ValueError("Invalid value for `layout_sections`, must not be `None`") # noqa: E501
self._layout_sections = layout_sections
@property
def theme_settings_values(self):
"""Gets the theme_settings_values of this BlogPost. # noqa: E501
:return: The theme_settings_values of this BlogPost. # noqa: E501
:rtype: dict(str, object)
"""
return self._theme_settings_values
@theme_settings_values.setter
def theme_settings_values(self, theme_settings_values):
"""Sets the theme_settings_values of this BlogPost.
:param theme_settings_values: The theme_settings_values of this BlogPost. # noqa: E501
:type: dict(str, object)
"""
if self.local_vars_configuration.client_side_validation and theme_settings_values is None: # noqa: E501
raise ValueError("Invalid value for `theme_settings_values`, must not be `None`") # noqa: E501
self._theme_settings_values = theme_settings_values
@property
def url(self):
"""Gets the url of this BlogPost. # noqa: E501
A generated field representing the URL of this blog post. # noqa: E501
:return: The url of this BlogPost. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this BlogPost.
A generated field representing the URL of this blog post. # noqa: E501
:param url: The url of this BlogPost. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and url is None: # noqa: E501
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
@property
def publish_date(self):
"""Gets the publish_date of this BlogPost. # noqa: E501
The date (ISO8601 format) the blog post is to be published at. # noqa: E501
:return: The publish_date of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._publish_date
@publish_date.setter
def publish_date(self, publish_date):
"""Sets the publish_date of this BlogPost.
The date (ISO8601 format) the blog post is to be published at. # noqa: E501
:param publish_date: The publish_date of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and publish_date is None: # noqa: E501
raise ValueError("Invalid value for `publish_date`, must not be `None`") # noqa: E501
self._publish_date = publish_date
@property
def deleted_at(self):
"""Gets the deleted_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this Blog Post was deleted. # noqa: E501
:return: The deleted_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._deleted_at
@deleted_at.setter
def deleted_at(self, deleted_at):
"""Sets the deleted_at of this BlogPost.
The timestamp (ISO8601 format) when this Blog Post was deleted. # noqa: E501
:param deleted_at: The deleted_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and deleted_at is None: # noqa: E501
raise ValueError("Invalid value for `deleted_at`, must not be `None`") # noqa: E501
self._deleted_at = deleted_at
@property
def created_at(self):
"""Gets the created_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this blog post was created. # noqa: E501
:return: The created_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this BlogPost.
The timestamp (ISO8601 format) when this blog post was created. # noqa: E501
:param created_at: The created_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def published(self):
"""Gets the published of this BlogPost. # noqa: E501
Boolean describing if this Blog Post is published. # noqa: E501
:return: The published of this BlogPost. # noqa: E501
:rtype: bool
"""
return self._published
@published.setter
def published(self, published):
"""Sets the published of this BlogPost.
Boolean describing if this Blog Post is published. # noqa: E501
:param published: The published of this BlogPost. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and published is None: # noqa: E501
raise ValueError("Invalid value for `published`, must not be `None`") # noqa: E501
self._published = published
@property
def updated_at(self):
"""Gets the updated_at of this BlogPost. # noqa: E501
The timestamp (ISO8601 format) when this Blog Post was last updated. # noqa: E501
:return: The updated_at of this BlogPost. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this BlogPost.
The timestamp (ISO8601 format) when this Blog Post was last updated. # noqa: E501
:param updated_at: The updated_at of this BlogPost. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value | |
nodeID:
logger.warning("Failed to generate stable node ID, returning empty string. If you see this message with a "
"work dir on a shared file system when using workers running on multiple nodes, you might "
"experience cryptic job failures")
if len(nodeID.replace('-', '')) < UUID_LENGTH:
# Some platforms (Mac) give us not enough actual hex characters.
# Repeat them so the result is convertable to a uuid.UUID
nodeID = nodeID.replace('-', '')
num_repeats = UUID_LENGTH // len(nodeID) + 1
nodeID = nodeID * num_repeats
nodeID = nodeID[:UUID_LENGTH]
return nodeID
class Toil:
"""
A context manager that represents a Toil workflow, specifically the batch system, job store,
and its configuration.
"""
def __init__(self, options):
"""
Initialize a Toil object from the given options. Note that this is very light-weight and
that the bulk of the work is done when the context is entered.
:param argparse.Namespace options: command line options specified by the user
"""
super(Toil, self).__init__()
self.options = options
self.config = None
"""
:type: toil.common.Config
"""
self._jobStore = None
"""
:type: toil.jobStores.abstractJobStore.AbstractJobStore
"""
self._batchSystem = None
"""
:type: toil.batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
self._provisioner = None
"""
:type: toil.provisioners.abstractProvisioner.AbstractProvisioner
"""
self._jobCache = dict()
self._inContextManager = False
self._inRestart = False
def __enter__(self):
"""
Derive configuration from the command line options, load the job store and, on restart,
consolidate the derived configuration with the one from the previous invocation of the
workflow.
"""
set_logging_from_options(self.options)
config = Config()
config.setOptions(self.options)
jobStore = self.getJobStore(config.jobStore)
if not config.restart:
config.workflowAttemptNumber = 0
jobStore.initialize(config)
else:
jobStore.resume()
# Merge configuration from job store with command line options
config = jobStore.config
config.setOptions(self.options)
config.workflowAttemptNumber += 1
jobStore.writeConfig()
self.config = config
self._jobStore = jobStore
self._inContextManager = True
return self
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Clean up after a workflow invocation. Depending on the configuration, delete the job store.
"""
try:
if (exc_type is not None and self.config.clean == "onError" or
exc_type is None and self.config.clean == "onSuccess" or
self.config.clean == "always"):
try:
if self.config.restart and not self._inRestart:
pass
else:
self._jobStore.destroy()
logger.info("Successfully deleted the job store: %s" % str(self._jobStore))
except:
logger.info("Failed to delete the job store: %s" % str(self._jobStore))
raise
except Exception as e:
if exc_type is None:
raise
else:
logger.exception('The following error was raised during clean up:')
self._inContextManager = False
self._inRestart = False
return False # let exceptions through
def start(self, rootJob):
"""
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
"""
self._assertContextManagerUsed()
self.writePIDFile()
if self.config.restart:
raise ToilRestartException('A Toil workflow can only be started once. Use '
'Toil.restart() to resume it.')
self._batchSystem = self.createBatchSystem(self.config)
self._setupAutoDeployment(rootJob.getUserScript())
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
# Pickle the promised return value of the root job, then write the pickled promise to
# a shared file, where we can find and unpickle it at the end of the workflow.
# Unpickling the promise will automatically substitute the promise for the actual
# return value.
with self._jobStore.writeSharedFileStream('rootJobReturnValue') as fH:
rootJob.prepareForPromiseRegistration(self._jobStore)
promise = rootJob.rv()
pickle.dump(promise, fH, protocol=pickle.HIGHEST_PROTOCOL)
# Setup the first JobDescription and cache it
rootJobDescription = rootJob.saveAsRootJob(self._jobStore)
self._cacheJob(rootJobDescription)
self._setProvisioner()
return self._runMainLoop(rootJobDescription)
finally:
self._shutdownBatchSystem()
def restart(self):
"""
Restarts a workflow that has been interrupted.
:return: The root job's return value
"""
self._inRestart = True
self._assertContextManagerUsed()
self.writePIDFile()
if not self.config.restart:
raise ToilRestartException('A Toil workflow must be initiated with Toil.start(), '
'not restart().')
from toil.job import JobException
try:
self._jobStore.loadRootJob()
except JobException:
logger.warning(
'Requested restart but the workflow has already been completed; allowing exports to rerun.')
return self._jobStore.getRootJobReturnValue()
self._batchSystem = self.createBatchSystem(self.config)
self._setupAutoDeployment()
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
self._setProvisioner()
rootJobDescription = self._jobStore.clean(jobCache=self._jobCache)
return self._runMainLoop(rootJobDescription)
finally:
self._shutdownBatchSystem()
def _setProvisioner(self):
if self.config.provisioner is None:
self._provisioner = None
else:
self._provisioner = cluster_factory(provisioner=self.config.provisioner,
clusterName=None,
zone=None, # read from instance meta-data
nodeStorage=self.config.nodeStorage,
nodeStorageOverrides=self.config.nodeStorageOverrides,
sseKey=self.config.sseKey)
self._provisioner.setAutoscaledNodeTypes(self.config.nodeTypes)
@classmethod
def getJobStore(cls, locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
name, rest = cls.parseLocator(locator)
if name == 'file':
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == 'aws':
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == 'google':
from toil.jobStores.googleJobStore import GoogleJobStore
return GoogleJobStore(rest)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
@staticmethod
def parseLocator(locator):
if locator[0] in '/.' or ':' not in locator:
return 'file', locator
else:
try:
name, rest = locator.split(':', 1)
except ValueError:
raise RuntimeError('Invalid job store locator syntax.')
else:
return name, rest
@staticmethod
def buildLocator(name, rest):
assert ':' not in name
return f'{name}:{rest}'
@classmethod
def resumeJobStore(cls, locator):
jobStore = cls.getJobStore(locator)
jobStore.resume()
return jobStore
@staticmethod
def createBatchSystem(config):
"""
Creates an instance of the batch system specified in the given config.
:param toil.common.Config config: the current configuration
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
:return: an instance of a concrete subclass of AbstractBatchSystem
"""
kwargs = dict(config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk)
from toil.batchSystems.registry import BATCH_SYSTEM_FACTORY_REGISTRY
try:
batch_system = BATCH_SYSTEM_FACTORY_REGISTRY[config.batchSystem]()
except:
raise RuntimeError(f'Unrecognized batch system: {config.batchSystem}')
if not config.disableCaching and not batch_system.supportsWorkerCleanup():
raise RuntimeError(f'{config.batchSystem} currently does not support shared caching, because it '
'does not support cleaning up a worker after the last job '
'finishes. Set the --disableCaching flag if you want to '
'use this batch system.')
logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", batch_system.__name__).lower())
return batch_system(**kwargs)
def _setupAutoDeployment(self, userScript=None):
"""
Determine the user script, save it to the job store and inject a reference to the saved
copy into the batch system such that it can auto-deploy the resource on the worker
nodes.
:param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the
user script. If None, it will be looked up in the job store.
"""
if userScript is not None:
# This branch is hit when a workflow is being started
if userScript.belongsToToil:
logger.debug('User script %s belongs to Toil. No need to auto-deploy it.', userScript)
userScript = None
else:
if (self._batchSystem.supportsAutoDeployment() and
not self.config.disableAutoDeployment):
# Note that by saving the ModuleDescriptor, and not the Resource we allow for
# redeploying a potentially modified user script on workflow restarts.
with self._jobStore.writeSharedFileStream('userScript') as f:
pickle.dump(userScript, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
from toil.batchSystems.singleMachine import \
SingleMachineBatchSystem
if not isinstance(self._batchSystem, SingleMachineBatchSystem):
logger.warning('Batch system does not support auto-deployment. The user '
'script %s will have to be present at the same location on '
'every worker.', userScript)
userScript = None
else:
# This branch is hit on restarts
if (self._batchSystem.supportsAutoDeployment() and
not self.config.disableAutoDeployment):
# We could deploy a user script
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with self._jobStore.readSharedFileStream('userScript') as f:
userScript = safeUnpickleFromStream(f)
except NoSuchFileException:
logger.debug('User script neither set explicitly nor present in the job store.')
userScript = None
if userScript is None:
logger.debug('No user script to auto-deploy.')
else:
logger.debug('Saving user script %s as a resource', userScript)
userScriptResource = userScript.saveAsResourceTo(self._jobStore)
logger.debug('Injecting user script %s into batch system.', userScriptResource)
self._batchSystem.setUserScript(userScriptResource)
def importFile(self, srcUrl, sharedFileName=None, symlink=False):
"""
Imports the file at the given URL into job store.
See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a
full description
"""
self._assertContextManagerUsed()
return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName, symlink=symlink)
def exportFile(self, jobStoreFileID, dstUrl):
"""
Exports file to destination pointed at by the destination URL.
See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.exportFile` for a
full description
"""
self._assertContextManagerUsed()
self._jobStore.exportFile(jobStoreFileID, dstUrl)
def _setBatchSystemEnvVars(self):
"""
Sets the environment variables required by the job store and those passed on command line.
"""
for envDict in (self._jobStore.getEnv(), self.config.environment):
for k, v in envDict.items():
self._batchSystem.setEnv(k, v)
def _serialiseEnv(self):
"""
Puts the environment in a globally accessible pickle file.
"""
# Dump out the environment of this process in the environment pickle file.
with self._jobStore.writeSharedFileStream("environment.pickle") as fileHandle:
pickle.dump(dict(os.environ), fileHandle, pickle.HIGHEST_PROTOCOL)
logger.debug("Written the environment for the jobs to the environment file")
def _cacheAllJobs(self):
"""
Downloads all jobs in the current job store into self.jobCache.
| |
_execute_without_auto_scaling(
parallel_path,
function,
'parallel',
)
logger.info(
'*************** Finished parallel test cases ***************\n'
)
def _execute_function(function, result_dir, func_dep=None):
if func_dep:
func_dep = _get_dependency_function(func_dep)
# Deploy function
_deploy_function(
func_dep['yaml_path'],
env=func_dep.get('environment')
)
endpoint = _get_function_endpoint(func_dep)
_wait_function_status_code(
endpoint['endpoint'],
endpoint['http_method'],
stop_status=[200],
check_status=[404],
data=func_dep.get('data')
)
logger.info('Start executing function {}'.format(function['name']))
# This is the result directory where the result will be dumped
function_result_path = os.path.join(result_dir, function['name'])
# Create the function res
_creat_dir(function_result_path)
_execute_sequential(function_result_path, function)
_execute_parallel(function_result_path, function)
def execute_experiment():
result_dir = _get_experiment_config()['result_dir']
# Create a the main directory which holds all function results
_creat_dir(result_dir)
functions = config['functions']
func_dep = None
for func in functions:
if func.get('depends_on'):
func_dep = func['depends_on']
_execute_function(func, result_dir, func_dep=func_dep)
logger.info('Wait 2 minutes before calling next function')
time.sleep(120)
def _validate_python_version():
if sys.version[0] != '3':
raise Exception('Python 3 not installed !!!')
def _validate_command(command):
try:
_execute_command(command, ignore_log=True)
except Exception:
msg = 'Failed to run {0}, make sure its installed'.format(command)
logger.error(msg)
raise Exception(msg)
def _validate_jmeter():
_validate_command('which jmeter')
def _validate_faas_cli():
_validate_command('which faas-cli')
def _validate_zip():
_validate_command('which zip')
def _validate_environment_variables(framework):
if framework == 'k8s':
if not os.environ.get('KUBECONFIG'):
raise Exception('KUBECONFIG variable is not set')
if not os.environ.get('OPENFAAS_URL'):
raise Exception('OPENFAAS_URL variable is not set')
def _is_warm_function(function_name):
return True if function_name == 'warmfunction' else False
# Use the same method created on Jmetal project https://github.com/jMetal/jMetalPy/blob/6f54940cb205df831f5498e2eac2520b331ee4fd/jmetal/lab/experiment.py#L484 #NOQA
def _wilcoxon_to_latex(df, caption, label, minimization=True, alignment='c'):
""" Convert a pandas DataFrame to a LaTeX tabular.
Prints labels in bold and does use math mode.
:param df: Pandas dataframe.
:param caption: LaTeX table caption.
:param label: LaTeX table label.
:param minimization: If indicator is minimization,
highlight the best values of mean/median; else, the lowest.
"""
num_columns, num_rows = df.shape[1], df.shape[0]
output = io.StringIO()
col_format = '{}|{}'.format(alignment, alignment * num_columns)
column_labels = ['\\textbf{{{0}}}' \
''.format(label.replace('_', '\\_'))
for label in df.columns]
# Write header
output.write('\\documentclass{article}\n')
output.write('\\usepackage[utf8]{inputenc}\n')
output.write('\\usepackage{tabularx}\n')
output.write('\\usepackage{amssymb}\n')
output.write('\\usepackage{amsmath}\n')
output.write('\\title{Wilcoxon - Mann-Whitney rank sum test}\n')
output.write('\\author{}\n')
output.write('\\begin{document}\n')
output.write('\\maketitle\n')
output.write('\\section{Table}\n')
output.write('\\begin{table}[!htp]\n')
output.write(' \\caption{{{}}}\n'.format(caption))
output.write(' \\label{{{}}}\n'.format(label))
output.write(' \\centering\n')
output.write(' \\begin{scriptsize}\n')
output.write(' \\begin{tabular}{%s}\n' % col_format)
output.write(' & {} \\\\\\hline\n'.format(' & '.join(column_labels)))
symbolo = '\\triangledown\ '
symbolplus = '\\blacktriangle\ '
if not minimization:
symbolo, symbolplus = symbolplus, symbolo
# Write data lines
for i in range(num_rows):
values = [val.replace('-', '\\text{--}\ ').replace('o', symbolo).replace('+', symbolplus) for val in df.iloc[i]]
output.write(' \\textbf{{{0}}} & ${1}$ \\\\\n'.format(
df.index[i], ' $ & $ '.join([str(val) for val in values]))
)
# Write footer
output.write(' \\end{tabular}\n')
output.write(' \\end{scriptsize}\n')
output.write('\\end{table}\n')
output.write('\\end{document}')
return output.getvalue()
# Use the same method created on Jmetal project https://github.com/jMetal/jMetalPy/blob/6f54940cb205df831f5498e2eac2520b331ee4fd/jmetal/lab/experiment.py#L545 #NOQA
def _check_minimization(factor):
if factor in ['resTime', 'errorPct']:
return True
else:
return False
def _update_dataframe_for_warm_cases(filename):
df = pd.read_csv(filename, skipinitialspace=True)
data = []
for framework in FRAMEWORKS:
for factor in SAMPLE:
for warm_case in ['warm', 'cold']:
df1 = df[
(df["framework"] == framework)
& (df["startTime"].str.contains(warm_case))
& (df["factor"] == factor)
]
# Aggregate All warm use cases
df1['startTime'] = warm_case
data.append(df1)
df = pd.concat(data)
return df
def _dump_latext_and_csv_per_factor(factor, table, output_dir):
table.to_csv(os.path.join(
output_dir, 'Wilcoxon-{}.csv'.format(factor)),
sep='\t', encoding='utf-8'
)
with open(os.path.join(
output_dir, 'Wilcoxon-{}.tex'.format(factor)),
'w') as latex:
latex.write(
_wilcoxon_to_latex(
table,
caption='Wilcoxon values of the'
' {} factor'
''.format(factor),
label='table:{}'.format(factor)
)
)
def _apply_wilcoxon_cal_on_data(factor, df1, df2):
data1 = df1["factorValue"]
data2 = df2["factorValue"]
median1 = median(data1)
median2 = median(data2)
if median1 == median2:
effect = '-'
else:
stat, p = mannwhitneyu(data1, data2)
if p <= 0.05:
if _check_minimization(factor):
if median1 <= median2:
effect = '+'
else:
effect = 'o'
else:
if median1 >= median2:
effect = '+'
else:
effect = 'o'
else:
effect = '-'
return effect
def _calculate_p_values_for_all_frameworks(
factor,
table,
frameworks,
case_numbers,
df
):
for i, row_framework in enumerate(frameworks[0:-1]):
wilcoxon = []
for j, col_framework in enumerate(frameworks[1:]):
line = []
if i <= j:
for case_number in case_numbers:
df1 = df[(df["framework"] == row_framework)
& (df["factor"] == factor)
& (df["caseNumber"] == case_number)]
df2 = df[(df["framework"] == col_framework)
& (df["factor"] == factor)
& (df["caseNumber"] == case_number)]
effect = _apply_wilcoxon_cal_on_data(
factor,
df1,
df2
)
line.append(effect)
wilcoxon.append(''.join(line))
if len(wilcoxon) < len(frameworks):
wilcoxon = [''] * (
len(frameworks) - len(wilcoxon) - 1) + wilcoxon
table.loc[row_framework] = wilcoxon
def _calculate_p_values_for_warm_cases(
warm_cases,
factor,
table,
frameworks,
case_numbers,
df):
for warm_case in warm_cases:
wilcoxon = []
line = []
for case_number in case_numbers:
df1 = df[(df["framework"] == frameworks[0])
& (df["factor"] == factor)
& (df["startTime"] == warm_case)
& (df["caseNumber"] == case_number)]
df2 = df[(df["framework"] == frameworks[1])
& (df["factor"] == factor)
& (df["startTime"] == warm_case)
& (df["caseNumber"] == case_number)]
effect = _apply_wilcoxon_cal_on_data(factor, df1, df2)
line.append(effect)
wilcoxon.append(''.join(line))
if len(wilcoxon) < len(frameworks):
wilcoxon = [''] * (
len(frameworks) - len(wilcoxon) - 1) + wilcoxon
table.loc[warm_case] = wilcoxon
# Inspired from the jMetal Project Source code
# https://github.com/jMetal/jMetalPy/blob/6f54940cb205df831f5498e2eac2520b331ee4fd/jmetal/lab/experiment.py#L295 #NOQA
def _compute_wilcoxon(function, filename, output_dir):
"""
:param filename: Input filename (summary).
:param output_dir: Output path.
"""
df = pd.read_csv(filename, skipinitialspace=True)
warm_cases = []
is_warm = False
if _is_warm_function(function):
df = _update_dataframe_for_warm_cases(filename)
warm_cases = pd.unique(df['startTime'])
is_warm = True
frameworks = pd.unique(df['framework'])
factors = pd.unique(df['factor'])
case_numbers = pd.unique(df['caseNumber'])
if is_warm:
table = pd.DataFrame(index=warm_cases, columns=['p_value'])
else:
table = pd.DataFrame(index=frameworks[0:-1], columns=frameworks[1:])
for factor in factors:
if is_warm:
_calculate_p_values_for_warm_cases(
warm_cases,
factor,
table,
frameworks,
case_numbers,
df
)
else:
_calculate_p_values_for_all_frameworks(
factor,
table,
frameworks,
case_numbers,
df
)
_dump_latext_and_csv_per_factor(factor, table, output_dir)
def _calculate_throughput(start_date, end_date, status_codes_200):
# the start & end date passed by are millisecond unix timestamp
# which need to be converted
seconds_dt1, millisecond_dt1 = divmod(int(start_date), 1000)
seconds_dt2, millisecond_dt2 = divmod(int(end_date), 1000)
start_date = '{0}.{1}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds_dt1)),
millisecond_dt1
)
end_date = '{0}.{1}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds_dt2)),
millisecond_dt2
)
start_date = datetime.datetime.strptime(
start_date, '%Y-%m-%d %H:%M:%S.%f'
)
end_date = datetime.datetime.strptime(
end_date, '%Y-%m-%d %H:%M:%S.%f'
)
diff = end_date - start_date
total_time = diff.seconds + diff.microseconds / 1000000
throughput = float(int(status_codes_200) / total_time)
return throughput
def _aggregate_warm_data_from_tests_cases(
path,
dir_case_path,
warm_cases,
run_index,
):
for warm_case, metrics in warm_cases.items():
_index = '{0}/{1}'.format(run_index, warm_case)
_item_to_add = warm_case.split('_')[0]
_aggregate_summaries_and_statistic(
path,
dir_case_path,
metrics,
_index
)
def _read_error_pct_from_statistic(target_statistic_path):
with open(target_statistic_path) as stat_path:
result = json.load(stat_path)
total_result = result['Total']['errorPct']
return total_result
def _read_from_summary_file(target_summary_path):
status_code_200 = 0
start_date = ''
end_date = ''
restime_list = []
with open(target_summary_path) as csv_file:
data = pd.read_csv(csv_file)
last_entry = len(data) - 1
for index, entry in data.iterrows():
if index == 0:
start_date = int(entry['timeStamp'])
elif index == last_entry:
end_date = int(entry['timeStamp'])
num = int(entry['elapsed'])
response_code = entry['responseCode']
if not isinstance(response_code, int):
if response_code.isnumeric():
response_code = int(response_code)
if response_code == 200:
status_code_200 += 1
restime_list.append(num)
summary_result = {
'2x_response_times': restime_list,
'start_date': start_date,
'end_date': end_date,
'status_code_200': status_code_200
}
return summary_result
def _get_summary_and_statistic_per_run(path, dir_case_path, run_num):
# The target file
path_to_summary = path['summary'].format(index=run_num)
path_to_statistic = path['statistic'].format(index=run_num)
target_summary_path = os.path.join(
dir_case_path, path_to_summary
)
target_statistic_path = os.path.join(
dir_case_path, path_to_statistic
)
return target_summary_path, target_statistic_path
def _aggregate_summaries_and_statistic(
path,
dir_case_path,
metrics,
run_num
):
target_summary_path, target_statistic_path = \
_get_summary_and_statistic_per_run(
path, dir_case_path, run_num
)
metrics['errorPct'].append(_read_error_pct_from_statistic(
target_statistic_path
))
if 'resTime' not in metrics:
metrics['resTime'] = []
summary_result = _read_from_summary_file(target_summary_path)
metrics['resTime'].append(summary_result['2x_response_times'])
throughput = _calculate_throughput(
summary_result['start_date'],
summary_result['end_date'],
summary_result['status_code_200']
)
logger.info('Test Case path file {0}'.format(target_summary_path))
metrics['throughput'].append(throughput)
def _parse_test_cases_results(
function,
path,
dir_case_path,
headers,
):
warm_cases = {
'warm_0': {
'errorPct': [],
'throughput': []
},
'cold_0': {
'errorPct': [],
'throughput': []
},
'warm_1': {
'errorPct': [],
'throughput': []
},
'cold_1': {
'errorPct': [],
'throughput': []
},
'warm_2': {
'errorPct': [],
'throughput': []
},
'cold_2': {
'errorPct': [],
'throughput': []
},
}
headers = headers or []
metrics = {
'errorPct': [],
'throughput': []
}
for index in range(6):
run_num = index + 1
if _is_warm_function(function):
_aggregate_warm_data_from_tests_cases(
path,
dir_case_path,
warm_cases,
run_num,
)
else:
logger.info('This is the run # {}'.format(run_num))
# Aggregate summaries and statistic
_aggregate_summaries_and_statistic(
path,
dir_case_path,
metrics,
run_num
)
# Check if this is a warm function or not
if _is_warm_function(function):
for warm_case, warm_metrics in warm_cases.items():
# Get the median result for response time
response_time_data = _remove_outliers_from_dataset(
warm_metrics['resTime']
)
median_result = median(response_time_data)
# Get the median result for throughput
throughput_data = _remove_outliers_from_dataset(
warm_metrics['throughput']
)
throughput_result = median(throughput_data)
error_pct_result = median(warm_metrics['errorPct'])
headers.append([
warm_case,
median_result,
throughput_result,
error_pct_result
])
else:
# Get the median result for response time
response_time_data = _remove_outliers_from_dataset(metrics['resTime'])
median_result = median(response_time_data)
# Get the median result for throughput
throughput_data = _remove_outliers_from_dataset(metrics['throughput'])
throughput_result = median(throughput_data)
error_pct_result = median(metrics['errorPct'])
headers.append([median_result, throughput_result, error_pct_result])
def _calculate_results(function,
case,
dir_case_path,
dir_to_create):
for path in case.get('paths'):
pre_header = ()
if _is_warm_function(function):
pre_header += ('startTime',)
headers = [pre_header + SAMPLE]
csv.register_dialect('path_dialect',
quoting=csv.QUOTE_NONNUMERIC,
skipinitialspace=True)
_parse_test_cases_results(
function,
path,
dir_case_path,
headers,
)
case_id = path['summary'].split('/')[0]
function_result = os.path.join(dir_to_create, case_id)
with open('{0}.csv'.format(function_result), | |
import copy
import itertools
import logging
import os
import tempfile
import gzip
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from kipoi_veff import BedOverlappingRg, SnvCenteredRg, ensure_tabixed_vcf
from kipoi_veff.scores import Logit, get_scoring_fns
from kipoi_veff.utils import select_from_dl_batch, OutputReshaper, default_vcf_id_gen, \
ModelInfoExtractor, BedWriter, VariantLocalisation
from kipoi_utils.utils import cd
from .snv_predict import SampleCounter, get_genomicranges_line, merge_intervals, get_variants_in_regions_search_vcf, \
get_variants_in_regions_sequential_vcf, analyse_model_preds, _overlap_vcf_region
from .utils import is_indel_wrapper
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def _generate_records_for_all_regions(regions, ref_seq):
"""
Generate records for all positions covered by the region
"""
from vcf.model import _Record, _Substitution
assert isinstance(regions["chr"], list) or isinstance(regions["chr"], np.ndarray)
assert isinstance(ref_seq, list)
assert len(regions["chr"]) == len(ref_seq)
contained_regions = []
vcf_records = []
for i, ref_seq_here in enumerate(ref_seq):
chrom, start, end = regions["chr"][i], regions["start"][i] + 1, regions["end"][i]
for pos, ref in zip(range(start, end + 1), ref_seq_here.upper()):
qual = 0
filt = []
info = {}
fmt = None
sample_indexes = None
for alt in ["A", "C", "G", "T"]:
# skip REF/REF variants - they should always be 0 anyways.
if ref == alt:
continue
ID = ":".join([chrom, str(pos), ref, alt])
record = _Record(chrom, pos, ID, ref, [_Substitution(alt)], qual, filt, info, fmt,
sample_indexes)
vcf_records.append(record)
contained_regions.append(i)
#
return vcf_records, contained_regions
def get_variants_for_all_positions(dl_batch, seq_to_meta, ref_sequences, process_lines_preselection=None):
"""
Function that generates VCF records for all positions in the input sequence(s). This is done by merging the regions.
When regions are party overlapping then a variant will be tagged with all sequence-fields that participated in the
merged region, hence not all input regions might be affected by the variant.
"""
vcf_records = [] # list of vcf records to use
process_lines = [] # sample id within batch
process_seq_fields = [] # sequence fields that should be mutated
#
meta_to_seq = {v: [k for k in seq_to_meta if seq_to_meta[k] == v] for v in seq_to_meta.values()}
all_meta_fields = list(set(seq_to_meta.values()))
#
if process_lines_preselection is None:
num_samples_in_batch = len(dl_batch['metadata'][all_meta_fields[0]]["chr"])
process_lines_preselection = range(num_samples_in_batch)
#
# If we should search for the overlapping VCF lines - for every sample collect all region objects
# under the assumption that all generated sequences have the same number of samples in a batch:
for line_id in process_lines_preselection:
# check is there is more than one metadata_field that is used:
if len(all_meta_fields) > 1:
# one region per meta_field
regions_by_meta = {k: get_genomicranges_line(dl_batch['metadata'][k], line_id)
for k in all_meta_fields}
sequence = {k: ref_sequences[k][line_id] for k in ref_sequences}
# regions_unif: union across all regions. meta_field_unif_r: meta_fields, has the length of regions_unif
regions_unif, meta_field_unif_r = merge_intervals(regions_by_meta)
# get list of merged reference sequences as defined by regions_unif.
merged_seq = merged_intervals_seq(regions_by_meta, sequence, regions_unif, meta_field_unif_r)
else:
# Only one meta_field and only one line hence:
meta_field_unif_r = [all_meta_fields]
# get respective reference sequence for the DL batch:
merged_seq = [ref_sequences[all_meta_fields[0]][line_id]]
# Only one region:
regions_unif = get_genomicranges_line(dl_batch['metadata'][all_meta_fields[0]], line_id)
#
vcf_records_here, process_lines_rel = _generate_records_for_all_regions(regions_unif, merged_seq)
#
for rec, sub_line_id in zip(vcf_records_here, process_lines_rel):
vcf_records.append(rec)
process_lines.append(line_id)
metas = []
for f in meta_field_unif_r[sub_line_id]:
metas += meta_to_seq[f]
process_seq_fields.append(metas)
return vcf_records, process_lines, process_seq_fields
def merged_intervals_seq(ranges_dict, sequence, regions_unif, meta_field_unif_r):
"""
ranges_dict: Dict of Genomic ranges objects for all the different metadata slots
sequence: Sequences stored in a dictionary with metadata slots as keys
regions_unif: A genomic ranges object of the unified ranges
meta_field_unif_r: Metadata slot names that are associated with a certain region in `regions_unif`
seq_to_meta: sequence slot keys to their associated metadata slot key
"""
all_joint_seqs = []
for reg_i, mf in enumerate(meta_field_unif_r):
reg_start = regions_unif["start"][reg_i]
reg_len = regions_unif["end"][reg_i] - reg_start
joint_seq = np.empty(reg_len, dtype=str)
joint_seq[:] = ""
for mf_here in mf:
rel_start, rel_end = ranges_dict[mf_here]["start"][0] - reg_start, ranges_dict[mf_here]["end"][
0] - reg_start
# When generating the merged sequence make sure the overlapping parts of the sequence match up!
if np.any(joint_seq[rel_start:rel_end] != ""):
assert all([a == b for a, b in zip(joint_seq[rel_start:rel_end], sequence[mf_here]) if a != ""])
joint_seq[rel_start:rel_end] = list(sequence[mf_here])
all_joint_seqs.append("".join(joint_seq.tolist()))
return all_joint_seqs
def _overlap_bedtools_region(bedtools_obj, regions):
"""
Overlap a vcf with regions generated by the dataloader
The region definition is assumed to be 0-based hence it is converted to 1-based for tabix overlaps!
Returns VCF records
"""
assert isinstance(regions["chr"], list) or isinstance(regions["chr"], np.ndarray)
contained_regions = []
bed_regions = []
for i in range(len(regions["chr"])):
chrom, start, end = regions["chr"][i], regions["start"][i] + 1, regions["end"][i]
region_str = "{0}:{1}-{2}".format(chrom, start, end)
bf_regions = bedtools_obj.tabix_intervals(region_str)
for region in bf_regions:
bed_regions.append(region)
contained_regions.append(i)
#
return bed_regions, contained_regions
def compress_genomicranges_list(input_list):
"""Convert list of genomicranges objects to a single genomicranges object."""
# TODO - directly use kipoi.metadata.GenomicRanges.collate(input_list) instead of this function
assert isinstance(input_list, list)
out_regions = {k: [] for k in ["chr", "start", "end", "strand"]}
[[out_regions[k].append(v[k][0]) for k in out_regions] for v in input_list]
return out_regions
def get_overlapping_bed_regions(dl_batch, seq_to_meta, bedtools_obj):
"""
Function that overlaps metadata ranges with a bed file.
Regions are not merged prior to overlap with bedtools_obj!
Arguments:
dl_batch: batch coming from the dataloader
seq_to_meta: dictionary that converts model input names to its associated metadata field.
bedtools_obj: Tabixed bedtools object of the regions that should be investigated.
"""
bed_regions = [] # list of vcf records to use
process_lines = [] # sample id within batch
process_seq_fields = [] # sequence fields that should be mutated
#
meta_to_seq = {v: [k for k in seq_to_meta if seq_to_meta[k] == v] for v in seq_to_meta.values()}
all_meta_fields = list(set(seq_to_meta.values()))
#
num_samples_in_batch = len(dl_batch['metadata'][all_meta_fields[0]]["chr"])
#
# If we should search for the overlapping VCF lines - for every sample collect all region objects
# under the assumption that all generated sequences have the same number of samples in a batch:
for line_id in range(num_samples_in_batch):
# check is there is more than one metadata_field that is used:
if len(all_meta_fields) > 1:
# As opposed to the VCF handling here don't merge intervals... If two metadata fields are overlapping this
# then they will be tested independently.
regions_unif_list = [get_genomicranges_line(dl_batch['metadata'][mf], line_id) for mf in all_meta_fields]
regions_unif = compress_genomicranges_list(regions_unif_list)
meta_field_unif_r = [[v] for v in all_meta_fields]
else:
# Only one meta_field and only one line hence:
meta_field_unif_r = [all_meta_fields]
# Only one region:
regions_unif = get_genomicranges_line(dl_batch['metadata'][all_meta_fields[0]], line_id)
#
bed_regions_here, process_lines_rel = _overlap_bedtools_region(bedtools_obj, regions_unif)
#
for reg, sub_line_id in zip(bed_regions_here, process_lines_rel):
bed_regions.append(reg)
process_lines.append(line_id)
metas = []
for f in meta_field_unif_r[sub_line_id]:
metas += meta_to_seq[f]
process_seq_fields.append(metas)
return bed_regions, process_lines, process_seq_fields
def _generate_seq_sets_mutmap_iter(dl_ouput_schema, dl_batch, seq_to_mut, seq_to_meta,
sample_counter, ref_sequences, bedtools_obj=None, vcf_fh=None,
vcf_id_generator_fn=None, vcf_search_regions=False, generate_rc=True,
batch_size=32, bed_id_conv_fh=None):
all_meta_fields = list(set(seq_to_meta.values()))
num_samples_in_batch = len(dl_batch['metadata'][all_meta_fields[0]]["chr"])
metadata_ids = sample_counter.get_ids(num_samples_in_batch)
if "_id" in dl_batch['metadata']:
metadata_ids = dl_batch['metadata']['id']
assert num_samples_in_batch == len(metadata_ids)
# now get the right region from the vcf:
# list of vcf records to use: vcf_records
process_ids = None # id from genomic ranges metadata: process_lines
# sample id within batch: process_lines
# sequence fields that should be mutated: process_seq_fields
query_bed_regions = None
query_vcf_records = None
if vcf_fh is not None:
# This is from the variant effect prediction function - use this to annotate the output in the end...
if vcf_search_regions:
query_vcf_records, query_process_lines, query_process_seq_fields = \
get_variants_in_regions_search_vcf(dl_batch, seq_to_meta, vcf_fh)
else:
# vcf_search_regions == False means: rely completely on the variant id
# so for every sample assert that all metadata ranges ids agree and then find the entry.
query_vcf_records, query_process_lines, query_process_seq_fields, query_process_ids = \
get_variants_in_regions_sequential_vcf(dl_batch, seq_to_meta, vcf_fh, vcf_id_generator_fn,
bed_id_conv_fh)
elif bedtools_obj is not None:
query_bed_regions, query_process_lines, query_process_seq_fields = \
get_overlapping_bed_regions(dl_batch, seq_to_meta, bedtools_obj)
else:
# No restrictions are given so process all input lines
query_process_lines = list(range(num_samples_in_batch))
# Now generate fake variants for all bases on all positions
# only the "query_process_lines" selected above should be considered!
vcf_records, process_lines, process_seq_fields = \
get_variants_for_all_positions(dl_batch, seq_to_meta, ref_sequences, query_process_lines)
# short-cut if no sequences are left
if len(process_lines) == 0:
raise StopIteration
if process_ids is None:
process_ids = []
for line_id in process_lines:
process_ids.append(metadata_ids[line_id])
# Generate a batched output
real_batch_size = batch_size // 2
if generate_rc:
real_batch_size = real_batch_size // 2
if real_batch_size == 0:
logger.warn("Batch size too small, resetting it to %d." % (2 + int(generate_rc) * 2))
real_batch_size = 1
n_batches = len(process_lines) // real_batch_size
if len(process_lines) % real_batch_size != 0:
n_batches += 1
for batch_i in range(n_batches):
bs, be = batch_i * real_batch_size, min(((batch_i + 1) * | |
#!/usr/bin/env python
import re
from collections import namedtuple
from collections import OrderedDict
import json
Point = namedtuple('Point', ['x', 'y'])
Rect = namedtuple('Rect', ['ll', 'ur'])
Port = namedtuple('Port', ['port_nm', 'layer', 'rect'])
Blockage = namedtuple('Blockage', ['layer', 'rect'])
Terminal = namedtuple('Terminal', ['net_nm', 'layer'])
class Placement:
def __init__( self):
self.die = None
self.block_placement = OrderedDict()
self.net_wire_lengths = []
def __repr__( self):
return 'Placement(' + str(self.die) + "," + str(self.block_placement) + "," + str(self.net_wire_lengths) + ')'
def semantic( self):
assert self.die is not None
assert self.die.ll.x <= self.die.ur.x
assert self.die.ll.y <= self.die.ur.y
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_die = re.compile( r'^DIE\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*$')
p_triple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s*$')
p_quadruple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$')
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_die.match(line)
if m:
self.die = Rect( Point( int(m.groups()[0]), int(m.groups()[1])), Point( int(m.groups()[2]), int(m.groups()[3])))
continue
m = p_triple.match(line)
if m:
self.net_wire_lengths.append( ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2]))))
continue
m = p_quadruple.match(line)
if m:
self.block_placement[m.groups()[0]] = ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2])), m.groups()[3])
continue
assert False, line
class Constraint:
def __init__( self):
pass
class SymmNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "SymmNet(" + str( self.lst0) + "," + str( self.lst1) + ")"
def semantic( self):
assert len(self.lst0) >= 2
assert len(self.lst1) >= 2
class CritNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "CritNet(" + self.net_nm + "," + self.level + ")"
def semantic( self):
assert self.level in ['mid','min']
class ShieldNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "ShieldNet(" + ")"
def semantic( self):
pass
class MatchBlock(Constraint):
def __init__( self):
pass
def __repr__( self):
return "MatchBlock(" + ")"
def semantic( self):
pass
class Constraints:
def __init__( self):
self.constraints = []
def __repr__( self):
return ','.join( [ str(x) for x in self.constraints])
def semantic( self):
for c in self.constraints:
c.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_constraint = re.compile( r'^(SymmNet|CritNet|ShieldNet|MatchBlock)'
r'\s*\('
r'(.*)'
r'\)\s*$')
p_bracecommasep = re.compile( r'^{(.+)}\s*,\s*{(.+)}$')
p_commasep = re.compile( r'^(\S+)\s*,\s*(\S+)$')
p_pin = re.compile( r'^(.+)/(.+)$')
def toLst( s):
lst = s.split(',')
assert len(lst) >= 2, lst
result = lst[0:1]
for e in lst[1:]:
m = p_pin.match( e)
if m:
block_nm = m.groups()[0]
formal_nm = m.groups()[1]
result.append( ( block_nm, formal_nm))
continue
result.append( ( 'terminal', e))
return result
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_constraint.match(line)
if m:
tag = m.groups()[0]
rest = m.groups()[1].strip( ' ')
if tag == 'SymmNet':
c = SymmNet()
mm = p_bracecommasep.match( rest)
assert mm, rest
c.lst0 = toLst( mm.groups()[0])
c.lst1 = toLst( mm.groups()[1])
elif tag == 'CritNet':
c = CritNet()
mm = p_commasep.match( rest)
assert mm, rest
c.net_nm = mm.groups()[0]
c.level = mm.groups()[1]
elif tag == 'ShieldNet':
c = ShieldNet()
pass
elif tag == 'MatchBlock':
c = MatchBlock()
pass
else:
assert False
self.constraints.append( c)
continue
assert False, line
class Net:
def __init__( self):
self.net_nm = None
self.pin_count = None
self.pin_lst = []
def __repr__( self):
return "Net(" + self.net_nm + "," + str(self.pin_count) + "," + str(self.pin_lst) + ")"
class Netlist:
def __init__( self):
self.params = OrderedDict()
self.nets = OrderedDict()
self.pins = {}
def __repr__( self):
return "Netlist(" + str(self.params) + "," + str(self.nets) + ")"
def semantic( self):
assert self.params['NumNets'] == len(self.nets)
nPins = sum( [ len([ x for x in v.pin_lst if x[0] != 'terminal']) for (k,v) in self.nets.items()])
assert self.params['NumPins'] == nPins, (self.params['NumPins'], nPins)
for (k,v) in self.nets.items():
assert v.pin_count is not None, k
assert v.pin_count == len(v.pin_lst), (k, v.pin_count, len(v.pin_lst))
for pin in v.pin_lst:
assert pin not in self.pins, (k, pin,'not in',self.pins)
self.pins[pin] = k
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumNets|NumPins)\s*:\s*(\d+)\s*$')
p_net_and_count = re.compile( r'^(\S+)\s*:\s*(\d+)\s*$')
p_pairs = re.compile( r'^(\S+)\s+(\S+)\s*$')
net = None
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_net_and_count.match(line)
if m:
net = Net()
net.net_nm = m.groups()[0]
net.pin_count = int(m.groups()[1])
self.nets[net.net_nm] = net
continue
m = p_pairs.match(line)
if m:
net.pin_lst.append( (m.groups()[0], m.groups()[1]))
continue
assert False, line
class Block:
def __init__( self, nm):
self.nm = nm
self.rect = None
self.port_count = None
self.port_lst = []
self.blockage_lst = []
def __repr__( self):
return 'Block(' + self.nm + "," + str(self.port_count) + "," + str(self.port_lst) + ')'
def semantic( self):
assert self.port_count is not None
assert self.port_count == len(self.port_lst), (self.port_count, len(self.port_lst))
class Blocks:
def __init__( self):
self.params = {}
self.block_lst = OrderedDict()
self.terminal_lst = []
def __repr__( self):
return 'Blocks(' + str(self.params) + "," + str(self.block_lst) + "," + str(self.terminal_lst) + ')'
def semantic( self):
assert self.params['NumSoftRectangularBlocks'] == 0
assert self.params['NumHardRectilinearBlocks'] == len(self.block_lst)
assert self.params['NumTerminals'] == len(self.terminal_lst)
for (k,v) in self.block_lst.items():
v.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumSoftRectangularBlocks|NumHardRectilinearBlocks|NumTerminals)\s*:\s*(\d+)\s*$')
p_outline = re.compile( r'^(\S+)\s+(hardrectilinear)\s+'
r'(\d+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*)*)'
r'$')
p_block = re.compile( r'^BLOCK\s+(\S+)\s*:\s*(\d+)\s*$')
p_port = re.compile( r'^(\S+)\s+(\S+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*){4})'
r'$')
p_terminal = re.compile( r'^(\S+)\s+(\S+)\s+terminal\s*$')
p_pair = re.compile( r'^\s*\(\s*(-?\d+)\s*,\s*(-?\d+)\s*\)(.*)$')
def parse_pair_list( s):
result = []
rest = s
while True:
m = p_blank.match( rest)
if m: return result
m = p_pair.match( rest)
assert m, rest
x = int(m.groups()[0])
y = int(m.groups()[1])
rest = m.groups()[2]
result.append( Point(x=x,y=y))
block = None
if True:
for line in fp:
line = line.rstrip('\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_outline.match(line)
if m:
block_nm = m.groups()[0]
block = Block( block_nm)
type_nm = m.groups()[1]
point_count = int(m.groups()[2])
point_lst = parse_pair_list( m.groups()[3])
assert point_count == len(point_lst)
assert point_count == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
assert rect.ll.x <= p.x
assert rect.ll.y <= p.y
assert rect.ur.x >= p.x
assert rect.ur.y >= p.y
block.rect = rect
self.block_lst[block_nm] = block
block = None
continue
m = p_block.match(line)
if m:
block_nm = m.groups()[0]
assert block_nm in self.block_lst
block = self.block_lst[block_nm]
block.port_count = int(m.groups()[1])
continue
m = p_port.match(line)
if m:
port_nm = m.groups()[0]
layer = m.groups()[1]
point_lst = parse_pair_list( m.groups()[2])
assert len(point_lst) == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
pass
# assert rect.ll.x <= p.x, (p, 'should be inside', rect)
# assert rect.ll.y <= p.y, (p, 'should be inside', rect)
# assert rect.ur.x >= p.x, (p, 'should be inside', rect)
# assert rect.ur.y >= p.y, (p, 'should be inside', rect)
if port_nm == 'INT':
blockage = Blockage( layer, rect)
block.blockage_lst.append( port)
else:
port = Port( port_nm, layer, rect)
block.port_lst.append( port)
continue
m = p_terminal.match(line)
if m:
net_nm = m.groups()[0]
layer = m.groups()[1]
self.terminal_lst.append( Terminal( net_nm, layer))
continue
assert False, line
import io
def test_n3():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, 0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_negative():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) | |
import sys
sys.path.append('../')
import argparse
import datetime
import json
import os
import keras
import keras.backend as K
import tensorflow as tf
import numpy as np
import pandas as pd
import csv
import globals
from globals import BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, \
NUM_CHANNELS, NUM_CLASSES, EPOCHS, INPUT_SHAPE, \
K_NEGATIVE_SAMPLE_RATIO_WEIGHT, LEARNING_RATE, \
IMG_CAM_WIDTH, IMG_CAM_HEIGHT, NUM_CAM_CHANNELS, INPUT_SHAPE_CAM
from keras.callbacks import ModelCheckpoint, TensorBoard, Callback, ReduceLROnPlateau
from keras.layers.core import Dense, Flatten, Dropout
from keras.layers import Input, concatenate, Reshape, BatchNormalization, Activation, Lambda, Concatenate
from keras.models import Model, Sequential
from keras.optimizers import Adam
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import Add
from common.camera_model import CameraModel
from process.globals import CAM_IMG_BOTTOM, CAM_IMG_TOP
from loader import get_data_and_ground_truth, data_generator_train, \
data_number_of_batches_per_epoch, filter_camera_data_and_gt, \
generate_index_list, file_prefix_for_timestamp, \
load_data
from model import build_model, load_model
from pretrain import calculate_population_weights
from common import pr_curve_plotter
from common.csv_utils import foreach_dirset
from train import LossHistory
def load_fcn(model_file, weights_file, lockLidarModel, lockCameraModel, trainable):
with open(model_file, 'r') as jfile:
print('Loading weights file {}'.format(weights_file))
print("reading existing model and weights")
model = keras.models.model_from_json(json.loads(jfile.read()))
model.load_weights(weights_file)
for layer in model.layers:
layer.trainable = trainable
if (lockCameraModel and "cameraNet" in layer.name) or (lockLidarModel and "lidarNet" in layer.name):
layer.trainable = False
print layer.name, "trainable mode changed to False"
model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss="mean_squared_error", metrics=['mae'])
print(model.summary())
return model
def load_gt(indicies, centroid_rotation, gt, obs_size):
batch_index = 0
for ind in indicies:
gt[batch_index,0] = centroid_rotation[0][ind] #tx
gt[batch_index,1] = centroid_rotation[1][ind] #ty
gt[batch_index,2] = centroid_rotation[2][ind] #tz
gt[batch_index,3] = centroid_rotation[5][ind] #rz
gt[batch_index,4] = obs_size[0][ind]
gt[batch_index,5] = obs_size[1][ind]
gt[batch_index,6] = obs_size[2][ind]
batch_index += 1
def load_radar_data(indicies, radar_ranges_angles, radar_data):
batch_index = 0
for ind in indicies:
radar_ranges_angles[batch_index,0] = radar_data[0][ind]
radar_ranges_angles[batch_index,1] = radar_data[1][ind]
batch_index +=1
def data_generator_FCN(obs_centroids, obs_size,
pickle_dir_and_prefix_cam, pickle_dir_and_prefix_lidar,
batch_size, radar_data, cache):
tx = obs_centroids[0]
ty = obs_centroids[1]
tz = obs_centroids[2]
rx = obs_centroids[3]
ry = obs_centroids[4]
rz = obs_centroids[5]
obsl = obs_size[0]
obsw = obs_size[1]
obsh = obs_size[2]
cam_images = np.ndarray(shape=(batch_size, globals.IMG_CAM_HEIGHT,
globals.IMG_CAM_WIDTH, globals.NUM_CAM_CHANNELS), dtype=float)
lidar_images = np.ndarray(shape=(batch_size, globals.IMG_HEIGHT,
globals.IMG_WIDTH, globals.NUM_CHANNELS), dtype=float)
centroid_rotation_size = np.ndarray(shape=(batch_size, 7), dtype=float)
centroid = np.ndarray(shape=(batch_size, 3), dtype=float)
rz = np.ndarray(shape=(batch_size,1), dtype=float)
radar_ranges_angles = np.ndarray(shape=(batch_size, 2), dtype=float)
num_batches = data_number_of_batches_per_epoch(pickle_dir_and_prefix_cam, batch_size)
indicies_list = np.arange(len(tx))
is_cache_avail = False
# if cache is not None:
# is_cache_avail = cache['cam_images'] is not None and cache['centroid'] is not None
# if not is_cache_avail:
# cache['cam_images'] = np.ndarray(shape=(len(pickle_dir_and_prefix_cam), globals.IMG_CAM_HEIGHT,
# globals.IMG_CAM_WIDTH, globals.NUM_CAM_CHANNELS), dtype=float)
# cache['lidar_images'] = np.ndarray(shape=(len(pickle_dir_and_prefix_lidar), globals.IMG_HEIGHT,
# globals.IMG_WIDTH, globals.NUM_CHANNELS), dtype=float)
# cache['radar_data'] = np.ndarray(shape=(len(radar_data[0]), 2), dtype=float)
#
# cache['centroid'] = np.ndarray(shape=(len(obs_centroids[0]), 3), dtype=float)
# cache['rz'] = np.ndarray(shape=(len(obs_centroids[0]),1), dtype=float)
while 1:
if cache is not None:
is_cache_avail = cache['cam_images'] is not None and cache['centroid'] is not None
if not is_cache_avail:
cache['cam_images'] = np.ndarray(shape=(len(pickle_dir_and_prefix_cam), globals.IMG_CAM_HEIGHT,
globals.IMG_CAM_WIDTH, globals.NUM_CAM_CHANNELS), dtype=float)
cache['lidar_images'] = np.ndarray(shape=(len(pickle_dir_and_prefix_lidar), globals.IMG_HEIGHT,
globals.IMG_WIDTH, globals.NUM_CHANNELS), dtype=float)
cache['radar_data'] = np.ndarray(shape=(len(radar_data[0]), 2), dtype=float)
cache['centroid'] = np.ndarray(shape=(len(obs_centroids[0]), 3), dtype=float)
cache['rz'] = np.ndarray(shape=(len(obs_centroids[0]),1), dtype=float)
indicies = generate_index_list(indicies_list, True, num_batches, batch_size)
for batch in range(num_batches):
batch_indicies = indicies[batch * batch_size:batch * batch_size + batch_size]
if not is_cache_avail:
load_data(batch_indicies, lidar_images, pickle_dir_and_prefix_lidar, "lidar", globals.NUM_CHANNELS)
load_data(batch_indicies, cam_images, pickle_dir_and_prefix_cam, "camera", globals.NUM_CAM_CHANNELS)
load_radar_data(batch_indicies, radar_ranges_angles, radar_data)
load_gt(batch_indicies, obs_centroids, centroid_rotation_size, obs_size)
np.copyto(centroid, centroid_rotation_size[:,0:3])
np.copyto(rz, centroid_rotation_size[:,3:4])
if cache is not None:
# save to cache
i = 0
for ind in batch_indicies:
np.copyto(cache['cam_images'][ind], cam_images[i])
np.copyto(cache['lidar_images'][ind], lidar_images[i])
np.copyto(cache['radar_data'][ind], radar_ranges_angles[i])
np.copyto(cache['centroid'][ind], centroid[i])
np.copyto(cache['rz'][ind], rz[i])
i += 1
else:
# copy from cache
i = 0
for ind in batch_indicies:
np.copyto(cam_images[i], cache['cam_images'][ind])
np.copyto(lidar_images[i], cache['lidar_images'][ind])
np.copyto(radar_ranges_angles[i], cache['radar_data'][ind])
np.copyto(centroid[i], cache['centroid'][ind])
np.copyto(rz[i], cache['rz'][ind])
i += 1
yield ([cam_images, lidar_images, radar_ranges_angles], [centroid, rz])
def get_data_and_ground_truth_matching_lidar_cam_frames(csv_sources, parent_dir):
txl_cam = []
tyl_cam = []
tzl_cam = []
rxl_cam = []
ryl_cam = []
rzl_cam = []
timestamps_cam = []
obsl_cam = []
obsw_cam = []
obsh_cam = []
pickle_dir_and_prefix_cam = []
pickle_dir_and_prefix_lidar = []
radar_range = []
radar_angle = []
def process(dirset):
lidar_truth_fname = dirset.dir+"/obs_poses_interp_transform.csv"
cam_truth_fname = dirset.dir+"/obs_poses_camera.csv"
radar_data_fname = dirset.dir+"/radar/radar_tracks.csv"
df_lidar_truths = pd.read_csv(lidar_truth_fname)
lidar_truths_list = df_lidar_truths['timestamp'].tolist()
df_radar_data = pd.read_csv(radar_data_fname)
#print lidar_rows[:,'timestamp']
def nearest_lidar_timestamp(cam_ts):
x = min(lidar_truths_list, key=lambda x:abs(x-cam_ts))
return x
def nearest_radar_timestamp_data(cam_ts):
return df_radar_data.ix[(df_radar_data['timestamp']-cam_ts).abs().argsort()[0]]
with open(cam_truth_fname) as csvfile_2:
readCSV_2 = csv.DictReader(csvfile_2, delimiter=',')
for row2 in readCSV_2:
ts = row2['timestamp']
tx = row2['tx']
ty = row2['ty']
tz = row2['tz']
rx = row2['rx']
ry = row2['ry']
rz = row2['rz']
pickle_dir_prefix = file_prefix_for_timestamp(dirset.dir, "camera", ts)
pickle_dir_and_prefix_cam.append(pickle_dir_prefix)
txl_cam.append(float(tx))
tyl_cam.append(float(ty))
tzl_cam.append(float(tz))
rxl_cam.append(float(rx))
ryl_cam.append(float(ry))
rzl_cam.append(float(rz))
timestamps_cam.append(ts)
obsl_cam.append(float(dirset.mdr['l']))
obsw_cam.append(float(dirset.mdr['w']))
obsh_cam.append(float(dirset.mdr['h']))
lidar_ts = nearest_lidar_timestamp(int(ts))
pickle_dir_prefix = file_prefix_for_timestamp(dirset.dir, "lidar", str(lidar_ts))
pickle_dir_and_prefix_lidar.append(pickle_dir_prefix)
radar_data = nearest_radar_timestamp_data(int(ts))
radar_range.append(float(radar_data['range']))
radar_angle.append(float(radar_data['angle']))
foreach_dirset(csv_sources, parent_dir, process)
obs_centroid = [txl_cam, tyl_cam, tzl_cam, rxl_cam, ryl_cam, rzl_cam, timestamps_cam]
obs_size = [obsl_cam, obsw_cam, obsh_cam]
radar_data = [radar_range, radar_angle]
return obs_centroid, pickle_dir_and_prefix_cam, obs_size, pickle_dir_and_prefix_lidar, radar_data
def build_FCN(input_layer, output_layer, net_name):
# cam_net_out is too big. apply max_pooling
if net_name=="cam2fcn":
output_layer = MaxPooling2D(pool_size=(4, 1), strides=None, padding='valid', data_format=None)(output_layer)
flatten_out = Flatten()(output_layer)
dropout_1 = Dropout(0.2)(flatten_out)
dense_1 = Dense(96, activation='relu', name='dense1_'+net_name,
kernel_initializer='random_uniform', bias_initializer='zeros')(dropout_1)
dropout_2 = Dropout(0.2)(dense_1)
dense_2 = Dense(48, activation='relu', name='dense2_'+net_name,
kernel_initializer='random_uniform', bias_initializer='zeros')(dropout_2)
return dense_2
def build_FCN_cam_lidar(cam_inp, lidar_inp, cam_net_out, lidar_net_out,\
lockLidarModel, lockCameraModel):
cam_net_out = build_FCN(cam_inp, cam_net_out, "cam2fcn")
lidar_net_out = build_FCN(lidar_inp, lidar_net_out, "ldr2fcn")
radar_inp = Input(shape=(2,), name='radar')
dense = concat_normalized = concat_input = concatenate([cam_net_out, lidar_net_out, radar_inp])
#concat_normalized = BatchNormalization(name='normalize', axis=-1)(concat_input)
#dense = Dense(3, activation='relu', name='fcn.dense',
# kernel_initializer='random_uniform', bias_initializer='zeros')(concat_normalized)
# output for centroid
dense_1_1 = Dense(3, activation='elu', name='dense1_1',
kernel_initializer='random_uniform', bias_initializer='zeros')(dense)
dense_1_2 = Dense(3, activation='elu', name='dense1_2',
kernel_initializer='random_uniform', bias_initializer='zeros')(dense)
d_1 = Dense(3, activation='linear', name='d1')(concatenate([dense_1_1, dense_1_2]))
# output for rotation
dense_2_1 = Dense(1, activation='elu', name='dense2_1',
kernel_initializer='random_uniform', bias_initializer='zeros')(dense)
dense_2_2 = Dense(1, activation='elu', name='dense2_2',
kernel_initializer='random_uniform', bias_initializer='zeros')(dense)
d_2 = Dense(1, activation='linear', name='d2')(concatenate([dense_2_1, dense_2_2]))
model = Model(inputs=[cam_inp, lidar_inp, radar_inp], outputs=[d_1, d_2])
for layer in model.layers:
layer.trainable = True
if (lockCameraModel and "cameraNet" in layer.name) or (lockLidarModel and "lidarNet" in layer.name):
layer.trainable = False
print layer.name, "trainable mode changed to False"
model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss="mean_squared_error", metrics=['mae'])
print(model.summary())
return model
def main():
parser = argparse.ArgumentParser(description='FCN trainer for radar/camera/lidar')
parser.add_argument("--train_file", type=str, default="../data/train_folders.csv",
help="list of data folders for training")
parser.add_argument("--val_file", type=str, default="../data/validation_folders.csv",
help="list of data folders for validation")
parser.add_argument("--dir_prefix", type=str, default="", help="absolute path to folders")
parser.add_argument('--camera_model', type=str, default="", help='Model Filename')
parser.add_argument('--camera_weights', type=str, default="", help='Weights Filename')
parser.add_argument('--lidar_model', type=str, default="", help='Model Filename')
parser.add_argument('--lidar_weights', type=str, default="", help='Weights Filename')
parser.add_argument('--fcn_model', type=str, default="", help='Model Filename')
parser.add_argument('--fcn_weights', type=str, default="", help='Weights Filename')
parser.add_argument('--outdir', type=str, default="./", help='output directory')
parser.add_argument('--cache', type=str, default=None, help='Cache data')
# if fcn_model file is given, weights of all network will come from fcn model even if
# camera/lidar model file names are given too. If you give camera/lidar model names along
# with fcn model, camera/lidar part will not be trained, only the top part.
# to create fcn model with previously trained camera/lidar models, do not enter fcn model
# file name. This way newly created fcn model will include camera/lidar weights and can be
# used to train only top part next time you run training.
args = parser.parse_args()
train_file = args.train_file
validation_file = args.val_file
outdir = args.outdir
dir_prefix = args.dir_prefix
cache_train, cache_val = None, None
if args.cache is not None:
cache_train = {'cam_images': None, 'lidar_images': None, 'radar_data': None, 'centroid': None, 'rz': None}
cache_val = {'cam_images': None, 'lidar_images': None, 'radar_data': None, 'centroid': None, 'rz': None}
lockCameraModel = None
camera_net = None
if args.camera_model != "":
lockCameraModel = True
if args.fcn_model == "":
weightsFile = args.camera_model.replace('json', 'h5')
if args.fcn_weights != "":
weightsFile = args.camera_weights
camera_net = load_model(args.camera_model, weightsFile,
INPUT_SHAPE_CAM, NUM_CLASSES, trainable=False,
layer_name_ext="cameraNet")
else:
lockCameraModel = False
if args.fcn_model == "":
camera_net = build_model(
INPUT_SHAPE_CAM, NUM_CLASSES, trainable=True,
data_source="camera", layer_name_ext="cameraNet")
if camera_net is not None:
cam_inp_layer = camera_net.input
cam_out_layer = camera_net.get_layer("deconv6acameraNet").output
lockLidarModel = None
lidar_net = None
if args.lidar_model != "":
lockLidarModel = True
if args.fcn_model == "":
weightsFile = args.lidar_model.replace('json', 'h5')
if args.lidar_weights != "":
weightsFile = args.lidar_weights
lidar_net = load_model(args.lidar_model, weightsFile,
INPUT_SHAPE, NUM_CLASSES, trainable=False,
layer_name_ext="lidarNet")
else:
lockLidarModel = False
if args.fcn_model == "":
lidar_net = build_model(
INPUT_SHAPE, NUM_CLASSES, trainable=True,
data_source="lidar", layer_name_ext="lidarNet")
if lidar_net is not None:
lidar_inp_layer = lidar_net.input
lidar_out_layer = lidar_net.get_layer("deconv6alidarNet").output
if args.fcn_model != "":
weightsFile = args.fcn_model.replace('json', 'h5')
if args.fcn_weights != "":
weightsFile = args.fcn_weights
cam_lidar_radar_net = load_fcn(args.fcn_model, | |
the possibilities:
# {mod, func, method, static_method, cls_method}
# -> {func, method, static_method, cls_method}
#
###############################################################################
# func 0
###############################################################################
def test_func_get_caller_info_0(capsys: pytest.CaptureFixture[str]) -> None:
"""Module level function 0 to test get_caller_info.
Args:
capsys: Pytest fixture that captures output
"""
exp_stack: Deque[CallerInfo] = deque()
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='',
func_name='test_func_get_caller_info_0',
line_num=1071)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=1084, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=1091, add=0)
call_seq = get_formatted_call_sequence(depth=1)
assert call_seq == get_exp_seq(exp_stack=exp_stack)
# test diag_msg
update_stack(exp_stack=exp_stack, line_num=1098, add=0)
before_time = datetime.now()
diag_msg('message 0', 0, depth=1)
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(depth_arg=1,
msg_arg=['message 0', 0])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=1111, add=0)
func_get_caller_info_1(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info1 = ClassGetCallerInfo1()
update_stack(exp_stack=exp_stack, line_num=1116, add=0)
cls_get_caller_info1.get_caller_info_m1(exp_stack=exp_stack, capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=1120, add=0)
cls_get_caller_info1.get_caller_info_s1(exp_stack=exp_stack, capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=1124, add=0)
ClassGetCallerInfo1.get_caller_info_c1(exp_stack=exp_stack, capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=1128, add=1)
cls_get_caller_info1.get_caller_info_m1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=1133, add=1)
cls_get_caller_info1.get_caller_info_s1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=1138, add=1)
ClassGetCallerInfo1.get_caller_info_c1bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info1s = ClassGetCallerInfo1S()
update_stack(exp_stack=exp_stack, line_num=1144, add=1)
cls_get_caller_info1s.get_caller_info_m1s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=1149, add=1)
cls_get_caller_info1s.get_caller_info_s1s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=1154, add=1)
ClassGetCallerInfo1S.get_caller_info_c1s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=1159, add=1)
cls_get_caller_info1s.get_caller_info_m1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=1164, add=1)
cls_get_caller_info1s.get_caller_info_s1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=1169, add=1)
ClassGetCallerInfo1S.get_caller_info_c1bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=1174, add=1)
cls_get_caller_info1s.get_caller_info_m1sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=1179, add=1)
cls_get_caller_info1s.get_caller_info_s1sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=1184, add=1)
ClassGetCallerInfo1S.get_caller_info_c1sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###############################################################################
# func 1
###############################################################################
def func_get_caller_info_1(exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Module level function 1 to test get_caller_info.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='',
func_name='func_get_caller_info_1',
line_num=1197)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=1211, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=1218, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
# test diag_msg
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=1226, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=1239, add=0)
func_get_caller_info_2(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info2 = ClassGetCallerInfo2()
update_stack(exp_stack=exp_stack, line_num=1244, add=0)
cls_get_caller_info2.get_caller_info_m2(exp_stack=exp_stack, capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=1248, add=0)
cls_get_caller_info2.get_caller_info_s2(exp_stack=exp_stack, capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=1252, add=0)
ClassGetCallerInfo2.get_caller_info_c2(exp_stack=exp_stack, capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=1256, add=1)
cls_get_caller_info2.get_caller_info_m2bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=1261, add=1)
cls_get_caller_info2.get_caller_info_s2bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=1266, add=1)
ClassGetCallerInfo2.get_caller_info_c2bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info2s = ClassGetCallerInfo2S()
update_stack(exp_stack=exp_stack, line_num=1272, add=1)
cls_get_caller_info2s.get_caller_info_m2s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=1277, add=1)
cls_get_caller_info2s.get_caller_info_s2s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=1282, add=1)
ClassGetCallerInfo2S.get_caller_info_c2s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=1287, add=1)
cls_get_caller_info2s.get_caller_info_m2bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=1292, add=1)
cls_get_caller_info2s.get_caller_info_s2bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=1297, add=1)
ClassGetCallerInfo2S.get_caller_info_c2bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=1302, add=1)
cls_get_caller_info2s.get_caller_info_m2sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=1307, add=1)
cls_get_caller_info2s.get_caller_info_s2sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=1312, add=1)
ClassGetCallerInfo2S.get_caller_info_c2sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###############################################################################
# func 2
###############################################################################
def func_get_caller_info_2(exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Module level function 1 to test get_caller_info.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='',
func_name='func_get_caller_info_2',
line_num=1324)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=1339, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=1346, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
# test diag_msg
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=1354, add=0)
before_time = datetime.now()
diag_msg('message 2', 2, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(depth_arg=len(exp_stack),
msg_arg=['message 2', 2])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=1367, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info3 = ClassGetCallerInfo3()
update_stack(exp_stack=exp_stack, line_num=1372, add=0)
cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack, capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=1376, add=0)
cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack, capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=1380, add=0)
ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack, capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=1384, add=1)
cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=1389, add=1)
cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=1394, add=1)
ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info3s = ClassGetCallerInfo3S()
update_stack(exp_stack=exp_stack, line_num=1400, add=1)
cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=1405, add=1)
cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=1410, add=1)
ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=1415, add=1)
cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=1420, add=1)
cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=1425, add=1)
ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=1430, add=1)
cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=1435, add=1)
cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=1440, add=1)
ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###############################################################################
# func 3
###############################################################################
def func_get_caller_info_3(exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Module level function 1 to test get_caller_info.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='',
func_name='func_get_caller_info_3',
line_num=1451)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=1467, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=1474, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
# test diag_msg
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=1482, add=0)
before_time = datetime.now()
diag_msg('message 2', 2, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(depth_arg=len(exp_stack),
msg_arg=['message 2', 2])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
exp_stack.pop()
###############################################################################
# Classes
###############################################################################
###############################################################################
# Class 0
###############################################################################
class TestClassGetCallerInfo0:
"""Class to get caller info 0."""
###########################################################################
# Class 0 Method 1
###########################################################################
def test_get_caller_info_m0(self,
capsys: pytest.CaptureFixture[str]) -> None:
"""Get caller info method 1.
Args:
capsys: Pytest fixture that captures output
"""
exp_stack: Deque[CallerInfo] = deque()
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='TestClassGetCallerInfo0',
func_name='test_get_caller_info_m0',
line_num=1509)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=1526, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=1533, add=0)
call_seq = get_formatted_call_sequence(depth=1)
assert call_seq == get_exp_seq(exp_stack=exp_stack)
# test diag_msg
update_stack(exp_stack=exp_stack, line_num=1540, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=1)
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(depth_arg=1,
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=1553, add=0)
func_get_caller_info_1(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info1 = ClassGetCallerInfo1()
update_stack(exp_stack=exp_stack, line_num=1558, add=1)
cls_get_caller_info1.get_caller_info_m1(exp_stack=exp_stack,
capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=1563, add=1)
cls_get_caller_info1.get_caller_info_s1(exp_stack=exp_stack,
capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=1568, add=1)
ClassGetCallerInfo1.get_caller_info_c1(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=1573, add=1)
cls_get_caller_info1.get_caller_info_m1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=1578, add=1)
cls_get_caller_info1.get_caller_info_s1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=1583, add=1)
ClassGetCallerInfo1.get_caller_info_c1bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info1s = ClassGetCallerInfo1S()
update_stack(exp_stack=exp_stack, line_num=1589, add=1)
cls_get_caller_info1s.get_caller_info_m1s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=1594, add=1)
cls_get_caller_info1s.get_caller_info_s1s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=1599, add=1)
ClassGetCallerInfo1S.get_caller_info_c1s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=1604, add=1)
cls_get_caller_info1s.get_caller_info_m1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=1609, add=1)
cls_get_caller_info1s.get_caller_info_s1bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=1614, add=1)
ClassGetCallerInfo1S.get_caller_info_c1bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=1619, add=1)
cls_get_caller_info1s.get_caller_info_m1sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=1624, add=1)
cls_get_caller_info1s.get_caller_info_s1sb(exp_stack=exp_stack,
capsys=capsys)
# call base class class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=1629, add=1)
ClassGetCallerInfo1S.get_caller_info_c1sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###########################################################################
# Class 0 Method 2
###########################################################################
def test_get_caller_info_helper(self,
capsys: pytest.CaptureFixture[str]
) -> None:
"""Get capsys for static methods.
Args:
capsys: Pytest fixture that captures output
"""
exp_stack: Deque[CallerInfo] = | |
already-present sequences are in the new segment
lmapping = [None] * len(conc)
rnew = []
for i in range(len(conc)):
j = align.find(conc.name(i), strict)
if j!=-1:
lmapping[i] = j
# checks the reverse (to find reciprocal matches when strict is false)
for i in range(len(align)):
j = conc.find(align.name(i), strict)
if j!=None:
if len(align.name(i))>len(conc.name(j)):
conc.name(j, align.name(i))
if lmapping[j]==None:
lmapping[j]=i
if j==None:
rnew.append(i)
# extends the old sequences
for i in range(len(conc)):
if lmapping[i]==None:
conc.appendSequence(i, ''.join([ch]*align.ls()))
else:
if groupCheck:
if conc.group(i) != align.group(lmapping[i]):
raise ValueError, 'group labels of sequence "%s" don\'t match between segments' %conc.name(i)
conc.appendSequence(i, align.sequence(lmapping[i]))
# adds new sequences
for i in rnew:
seq = ''.join([ch]*length) + align.sequence(i)
conc.append( align.name(i), seq, align.group(i) )
# consider spacing
if c<len(spacer):
# checks again
if (not isinstance(spacer[c], int) or spacer[c]<0):
raise ValueError, 'spacer argument of concat function must be a non-negative integer or a sequence of non-negative integers'
# appends spacer
for i in range(len(conc)):
conc.appendSequence(i, ''.join([ch] * spacer[c]))
# counts it
length+= spacer[c]
c+=1
length+=align.ls()
# convert to align
aConc = data.Align()
aConc.addSequences(conc)
return aConc
########################################################################
def genalys2fasta(iname):
"""
Converts Genalys-formatted sequence alignment files to fasta. The
function imports files generated through the option *Save SNPs* of
Genalys 2.8. *iname* if the name of the Genalys output file. Returns
an :class:`~egglib.Align` instance.
"""
file = open(iname,"r")
res = data.Align()
insertions = []
flag = False
for line in file:
line = line.split("\t")
if (len(line)>1 and line[0]=="Polymorphism"):
flag = True
if (len(line)>1 and line[0]=="IN" and flag):
insertions.extend(line[1].split("/"))
if len(insertions)>0:
tp = insertions[0].split("_")
if len(tp)==1:
tp = tp[0].split(".")
if len(tp)==1:
tp.append("1")
finsertions = [tp]
for i in insertions:
i= i.split("_")
if len(i)==1:
tp = tp[0].split(".")
if len(tp)==1:
i.append("1")
if i[0]!=finsertions[-1][0]:
finsertions.append(i)
finsertions[-1][1] = i[1]
if len(insertions)>0:
insertions = finsertions
file.close()
file = open(iname)
noms = []
sequences = []
maxlen = 0
for line in file:
line = line.split("\t")
if len(line)>1:
bidon= re.match(".+\.ab1$",line[1])
if bidon!=None:
noms.append(line[1])
sequences.append("")
index = 6
for i in range(10):
if line[i]=="F" or line[i]=="R":
index = i+1
break
if line[index]!="":
debut = int(line[index])-1
for i in insertions:
if int(i[0])<=debut:
debut= debut+ int(i[1])
else:
break
for i in range(debut):
sequences[-1]= sequences[-1]+ "?"
sequences[-1]= sequences[-1]+line[-1].rstrip("\n")
if len(sequences[-1])>maxlen:
maxlen = len(sequences[-1])
for i in range(len(sequences)):
sequences[i] = sequences[i].replace("_","-")
for j in range(len(sequences[i]),maxlen):
sequences[i]= sequences[i]+"?"
res.append(noms[i], sequences[i])
return res
########################################################################
def get_fgenesh(fname):
"""
Imports fgenesh output. *fname* must be the name of a file
containing fgenesh ouput. The feature definition are parsed an
returned as a list of ``gene`` and ``CDS`` features represented by
dictionaries. Note that 5' partial features might not be in the
appropriate frame and that it can be necessary to add a ``codon_start``
qualifier.
"""
# checking
if not os.path.isfile(fname):
raise Exception, 'error, file not found: %s' %fname
return []
# define the locus name
locus = os.path.basename(fname).split('.')[0]
# import the raw data
f = open(fname)
data = f.read()
f.close()
# supports for mac/windows files
data= data.replace('\r\n', '\n')
data= data.replace('\r', '\n')
# gets the feature table
try:
data_sub= data.split(' G Str Feature Start End Score ORF Len\n')[1].split('Predicted protein(s):\n')[0]
except IndexError:
raise ValueError, 'invalid fgenesh format: %s' %fname
data_sub= data_sub.split('\n\n')
# edit
del data_sub[-1]
data_sub[0]= '\n'.join(data_sub[0].split('\n')[1:])
# iteratively grabs the features
features = {}
for i in data_sub:
pos = []
start = 1
rank = '---'
strand = '---'
for j in i.split('\n'):
a =re.search(' ?[0-9]+ ([+|-]) (TSS|PolA) +([0-9]+)', j)
b =re.search(' ?([0-9]+) ([+|-]) + ([0-9])+ CDS(o|f|i|l) +([0-9]+) - +([0-9]+) +[-\.0-9]+ + ([0-9]+)', j)
if b:
if (b.group(3)=="1"):
if (int(b.group(5))==int(b.group(7))): start= 1
elif (int(b.group(5))==(int(b.group(7))-1)): start= 2
elif (int(b.group(5))==(int(b.group(7))-2)): start= 3
else: sys.exit('error in file '+fname)
pos.append( [int(b.group(5))-1, int(b.group(6))-1 ] )
rank = b.group(1)
if (b.group(2)=='+'): strand = 'plus'
else: strand = 'minus'
features['cds'+rank] ={
'gene': locus+'_'+rank,
'strand': strand,
'pos': pos,
'type': 'CDS',
'note': 'fgenesh prediction'
}
features['gene'+rank] ={
'gene': locus+'_'+rank,
'strand': strand,
'pos': [[ pos[0][0], pos[-1][1] ]],
'type': 'gene',
'note': 'fgenesh prediction'
}
# gets the sequence section
try:
data_sub= data.split(' G Str Feature Start End Score ORF Len\n')[1].split('Predicted protein(s):\n')[1].split('>')
except IndexError:
raise ValueError, 'invalid fgenesh format: %s' %fname
del data_sub[0]
if ( (2*len(data_sub)!=len(features)) and
(len(data_sub)!=len(features)) ) : raise IOError, 'cannot import %s' %fname
# returns the sequences as a table
return [ features[i] for i in features ]
########################################################################
class Mase(list):
"""
Minimal implementation of the mase format (allowing input/output
operations). This class emulates a list of dictionaries, each
dictionary representing a sequence and describing the keys *header*,
*name* and *sequence*. However, the string formatter (``str(mase)``
or ``print mase``, where *mase* is a :class:`~egglib.tools.Mase` instance)
generates a mase-formatted string. Object attributes are *header*
(a string with file-level information), *species* (the species of
the ingroup), *align* (an :class:`~egglib.Align` instance corresponding
to the data contained in the instance, and created upon construction).
Modifying this instance has no effect.
"""
def __init__(self, input=None):
"""
The constructor takes an optional argument that can be a string
giving the path to a mase-formatted file, or a :class:`~egglib.Align`
instance. The constructor is currently unable to import
population labels, and only sequences marked as *ingroup* are
imported.
.. versionchanged:: 2.0.1
An :class:`IOError` is raised upon file formatting error.
"""
if isinstance(input, basestring):
self._parse(input)
elif isinstance(input, data.Align):
self._import(input)
elif input==None:
self.header= ''
self.species=None
self.align= data.Align()
else:
raise ValueError, 'mase initialized with invalid type'
####################################################################
def _parse(self, fname):
self.header= ''
self.align= data.Align()
if not os.path.isfile(fname):
raise IOError, 'cannot open '+fname
f=open(fname)
stuff= f.read()
f.close()
# foreign file support
stuff= stuff.replace('\r\n', '\n')
stuff= stuff.replace('\r', '\n')
stuff= stuff.split('\n')
stuff= [ i+'\n' for i in stuff ]
while (stuff[0][:2]==';;'):
self.header+= stuff[0][2:]
del stuff[0]
stuff = ''.join(stuff)
stuff= stuff.split(';')
if not len(stuff):
raise IOError, 'error in mase file '+fname
del stuff[0]
buff = []
for i in stuff:
i= i.split('\n')
if (len(i)<3):
self.clear()
self.header= ''
raise IOError, 'error in mase file '+fname
buff.append({'header': i[0],
'name': i[1],
'sequence': ''.join(i[2:]).upper()})
obj= re.search('@ of species = (\d+) INGROUP_(.+)\n([0-9,]+)',self.header)
indices = map(int, obj.group(3).split(','))
if len(indices) != int(obj.group(1)):
raise IOError, 'error in mase file '+fname
if len(indices)>len(buff):
raise IOError, 'error in mase file '+fname
self.species = obj.group(2).strip()
for i in indices:
if i-1>=len(buff):
raise IOError, 'error in mase file '+fname
self.align.append( buff[i-1]['name'], buff[i-1]['sequence'] )
self.append(buff[i-1])
####################################################################
def _import(self, alignment):
self.header= ''
self.align= data.Align()
for i in alignment:
self.append({'header': '',
'name': i[0],
'sequence': i[1]})
self.align.append(*i)
####################################################################
def __str__(self):
string= StringIO.StringIO()
string.write(';;%s\n' %self.header.strip())
for i in self:
string.write(';%s\n%s\n' %(i['header'].strip(), i['name']))
c=0
for j in i['sequence']:
string.write(j)
c+=1
if (c==60):
string.write('\n')
c=0
string.write('\n')
return string.getvalue()
########################################################################
class ReadingFrame:
"""
Handles reading frame positions.
"""
def __init__(self, frame):
"""
*frame* must be a sequence of ``(start, stop, codon_start)``
sequences where *start* and *stop* gives the first and last
position of an exon and *codon_start* is 1 if the first position
of the exon is the first position of a codon (e.g. ``ATG ATG``),
2 if the first position of the segment is the second position of
a codon (e.g. ``TG ATG``), 3 if the first position of the
segment is the third position a of codon (e.g. ``G ATG``), or
``None`` if the reading frame is continuing the previous exon.
If *codon_start* of the first segment is ``None``, 1 will be
assumed. It is not possible to modify the codon positions held
by the instance after construction.
"""
self._frame = []
try:
for start,stop,codon_start in frame:
self._frame.append((start,stop,codon_start)) # deep copy
except ValueError,TypeError:
raise ValueError, 'invalid reading frame specification'
self._cached_codons = self.codons()
####################################################################
def exon(self, x):
"""
Returns the exon index of a position. Returns ``-1`` if the
position falls outside specified segments (out of ranges or
in introns).
"""
for i,v in enumerate(self._frame):
if x>=v[0] and x<=v[1]: return i
return None
####################################################################
def codon(self, x):
"""
If the position ``x`` falls in a complete codon, returns the
| |
town, Florida",1359),
("North River Shores CDP, Florida",3663),
("North Sarasota CDP, Florida",8997),
("North Weeki Wachee CDP, Florida",7853),
("Oak Hill city, Florida",1775),
("Oakland town, Florida",2953),
("Oakland Park city, Florida",44339),
("Oakleaf Plantation CDP, Florida",27252),
("Oak Ridge CDP, Florida",23712),
("Ocala city, Florida",58598),
("Ocean Breeze town, Florida",171),
("Ocean City CDP, Florida",5949),
("Ocean Ridge town, Florida",1648),
("Ocoee city, Florida",44829),
("Odessa CDP, Florida",8517),
("Ojus CDP, Florida",17462),
("Okahumpka CDP, Florida",146),
("Okeechobee city, Florida",5638),
("Oldsmar city, Florida",14444),
("Olga CDP, Florida",2324),
("Olympia Heights CDP, Florida",14536),
("Ona CDP, Florida",739),
("Opa-locka city, Florida",16420),
("Orange City city, Florida",11448),
("Orange Park town, Florida",8680),
("Orangetree CDP, Florida",4271),
("Orchid town, Florida",438),
("Oriole Beach CDP, Florida",1880),
("Orlando city, Florida",275690),
("Orlovista CDP, Florida",6574),
("Ormond Beach city, Florida",41907),
("Ormond-by-the-Sea CDP, Florida",7322),
("Osprey CDP, Florida",7028),
("Otter Creek town, Florida",53),
("Oviedo city, Florida",39599),
("Pace CDP, Florida",23749),
("Page Park CDP, Florida",387),
("Pahokee city, Florida",6167),
("Paisley CDP, Florida",1338),
("Palatka city, Florida",10320),
("Palm Bay city, Florida",110271),
("Palm Beach town, Florida",8667),
("Palm Beach Gardens city, Florida",54528),
("Palm Beach Shores town, Florida",1128),
("Palm City CDP, Florida",25225),
("Palm Coast city, Florida",83940),
("Palmetto city, Florida",13490),
("Palmetto Bay village, Florida",24564),
("Palmetto Estates CDP, Florida",15970),
("Palm Harbor CDP, Florida",61995),
("Palmona Park CDP, Florida",1234),
("Palm River-Clair Mel CDP, Florida",23627),
("Palm Shores town, Florida",1181),
("Palm Springs village, Florida",24675),
("Palm Springs North CDP, Florida",5515),
("Palm Valley CDP, Florida",21562),
("Panacea CDP, Florida",1001),
("Panama City city, Florida",36880),
("Panama City Beach city, Florida",12683),
("Paradise Heights CDP, Florida",539),
("Parker city, Florida",4584),
("Parkland city, Florida",30471),
("Pasadena Hills CDP, Florida",10216),
("Patrick AFB CDP, Florida",1506),
("Paxton town, Florida",733),
("Pea Ridge CDP, Florida",4540),
("Pebble Creek CDP, Florida",8284),
("Pelican Bay CDP, Florida",5844),
("Pembroke Park town, Florida",6625),
("Pembroke Pines city, Florida",168260),
("Penney Farms town, Florida",678),
("Pensacola city, Florida",52562),
("Perry city, Florida",6925),
("Pierson town, Florida",1423),
("Pine Air CDP, Florida",2691),
("Pine Castle CDP, Florida",11643),
("Pinecrest village, Florida",19446),
("Pine Hills CDP, Florida",75575),
("Pine Island CDP, Florida",13),
("Pine Island Center CDP, Florida",2002),
("Pine Lakes CDP, Florida",537),
("Pineland CDP, Florida",309),
("Pine Level CDP, Florida",136),
("Pinellas Park city, Florida",52291),
("Pine Manor CDP, Florida",5248),
("Pine Ridge CDP (Citrus County), Florida",10279),
("Pine Ridge CDP (Collier County), Florida",1809),
("Pinewood CDP, Florida",17027),
("Pioneer CDP, Florida",582),
("Pittman CDP, Florida",246),
("Plantation city, Florida",92775),
("Plantation CDP, Florida",5022),
("Plantation Island CDP, Florida",295),
("Plantation Mobile Home Park CDP, Florida",1522),
("Plant City city, Florida",38119),
("Poinciana CDP, Florida",67169),
("Point Baker CDP, Florida",3460),
("Polk City town, Florida",2203),
("Pomona Park town, Florida",1046),
("Pompano Beach city, Florida",108855),
("Ponce de Leon town, Florida",497),
("Ponce Inlet town, Florida",3192),
("Port Charlotte CDP, Florida",61204),
("Port LaBelle CDP, Florida",5000),
("Port Orange city, Florida",61601),
("Port Richey city, Florida",2788),
("Port St. Joe city, Florida",3465),
("Port St. John CDP, Florida",11729),
("Port St. Lucie city, Florida",183762),
("Port Salerno CDP, Florida",11317),
("Pretty Bayou CDP, Florida",3674),
("Princeton CDP, Florida",30852),
("Progress Village CDP, Florida",10136),
("Punta Gorda city, Florida",19123),
("Punta Rassa CDP, Florida",1908),
("Quail Ridge CDP, Florida",2031),
("Quincy city, Florida",7512),
("Raiford town, Florida",224),
("Raleigh CDP, Florida",482),
("Reddick town, Florida",590),
("Redington Beach town, Florida",1397),
("Redington Shores town, Florida",2262),
("Richmond Heights CDP, Florida",11410),
("Richmond West CDP, Florida",37965),
("Ridgecrest CDP, Florida",3401),
("Ridge Manor CDP, Florida",4606),
("Ridge Wood Heights CDP, Florida",4140),
("Rio CDP, Florida",831),
("Rio Pinar CDP, Florida",5354),
("River Park CDP, Florida",6818),
("River Ridge CDP, Florida",5191),
("Riverview CDP, Florida",91190),
("Riviera Beach city, Florida",34352),
("Rockledge city, Florida",26906),
("Roeville CDP, Florida",638),
("Roosevelt Gardens CDP, Florida",2585),
("Roseland CDP, Florida",1269),
("Rotonda CDP, Florida",8921),
("Royal Palm Beach village, Florida",38344),
("Royal Palm Estates CDP, Florida",3485),
("Ruskin CDP, Florida",23477),
("Safety Harbor city, Florida",17663),
("St. Augustine city, Florida",14223),
("St. Augustine Beach city, Florida",6883),
("St. Augustine Shores CDP, Florida",9418),
("St. Augustine South CDP, Florida",5750),
("St. Cloud city, Florida",49108),
("St. George Island CDP, Florida",730),
("St. James City CDP, Florida",3437),
("St. Leo town, Florida",1164),
("St. Lucie Village town, Florida",713),
("St. Marks city, Florida",224),
("St. Pete Beach city, Florida",9572),
("St. Petersburg city, Florida",259041),
("Samoset CDP, Florida",4309),
("Samsula-Spruce Creek CDP, Florida",5272),
("San Antonio city, Florida",1336),
("San Carlos Park CDP, Florida",17778),
("San Castle CDP, Florida",3917),
("Sanford city, Florida",58862),
("Sanibel city, Florida",7224),
("Sarasota city, Florida",56102),
("Sarasota Springs CDP, Florida",15781),
("Satellite Beach city, Florida",10721),
("Sawgrass CDP, Florida",4894),
("Schall Circle CDP, Florida",1446),
("Sea Ranch Lakes village, Florida",546),
("Sebastian city, Florida",24536),
("Sebring city, Florida",10636),
("Seffner CDP, Florida",8786),
("Seminole city, Florida",18542),
("Seminole Manor CDP, Florida",2889),
("Seville CDP, Florida",612),
("Sewall's Point town, Florida",2102),
("Shady Hills CDP, Florida",12435),
("Shalimar town, Florida",892),
("Sharpes CDP, Florida",3049),
("Siesta Key CDP, Florida",5667),
("Silver Lake CDP, Florida",1719),
("Silver Springs Shores CDP, Florida",8966),
("Sky Lake CDP, Florida",6119),
("Sneads town, Florida",1898),
("Solana CDP, Florida",904),
("Sopchoppy city, Florida",407),
("Sorrento CDP, Florida",343),
("South Apopka CDP, Florida",6081),
("South Bay city, Florida",5116),
("South Beach CDP, Florida",3538),
("South Bradenton CDP, Florida",25646),
("South Brooksville CDP, Florida",3944),
("Southchase CDP, Florida",15096),
("South Daytona city, Florida",12734),
("Southeast Arcadia CDP, Florida",7813),
("Southgate CDP, Florida",7338),
("South Gate Ridge CDP, Florida",6328),
("South Highpoint CDP, Florida",5335),
("South Miami city, Florida",12202),
("South Miami Heights CDP, Florida",37861),
("South Palm Beach town, Florida",1347),
("South Pasadena city, Florida",5071),
("South Patrick Shores CDP, Florida",6802),
("South Sarasota CDP, Florida",5316),
("South Venice CDP, Florida",14419),
("Southwest Ranches town, Florida",7880),
("Springfield city, Florida",9475),
("Spring Hill CDP, Florida",107855),
("Springhill CDP, Florida",93),
("Spring Lake CDP, Florida",589),
("Spring Ridge CDP, Florida",423),
("Stacey Street CDP, Florida",798),
("Starke city, Florida",5375),
("Steinhatchee CDP, Florida",737),
("Stock Island CDP, Florida",4404),
("Stuart city, Florida",16100),
("Sugarmill Woods CDP, Florida",8851),
("Sumatra CDP, Florida",266),
("Sun City Center CDP, Florida",23480),
("Suncoast Estates CDP, Florida",4314),
("Sunny Isles Beach city, Florida",22171),
("Sunrise city, Florida",93199),
("Sunset CDP, Florida",15770),
("Surfside town, Florida",5817),
("Sweetwater city, Florida",21027),
("Taft CDP, Florida",1472),
("Tallahassee city, Florida",190180),
("Tamarac city, Florida",64748),
("Tamiami CDP, Florida",57514),
("Tampa city, Florida",376345),
("Tangelo Park CDP, Florida",2755),
("Tangerine CDP, Florida",2403),
("Tarpon Springs city, Florida",24974),
("Tavares city, Florida",16199),
("Tavernier CDP, Florida",2260),
("Taylor Creek CDP, Florida",4189),
("Temple Terrace city, Florida",26062),
("Tequesta village, Florida",6011),
("The Acreage CDP, Florida",39656),
("The Crossings CDP, Florida",23558),
("The Hammocks CDP, Florida",60473),
("The Meadows CDP, Florida",4346),
("The Villages CDP, Florida",77609),
("Thonotosassa CDP, Florida",13966),
("Three Lakes CDP, Florida",16043),
("Three Oaks CDP, Florida",4314),
("Tice CDP, Florida",5142),
("Tierra Verde CDP, Florida",3557),
("Tiger Point CDP, Florida",3122),
("Tildenville CDP, Florida",1199),
("Timber Pines CDP, Florida",5303),
("Titusville city, Florida",45483),
("Town 'n' Country CDP, Florida",85620),
("Treasure Island city, Florida",6875),
("Trenton city, Florida",2211),
("Trilby CDP, Florida",354),
("Trinity CDP, Florida",10646),
("Tyndall AFB CDP, Florida",3374),
("Umatilla city, Florida",3693),
("Union Park CDP, Florida",13254),
("University CDP (Hillsborough County), Florida",46359),
("University CDP (Orange County), Florida",36086),
("University Park CDP, Florida",26317),
("Upper Grand Lagoon CDP, Florida",17045),
("Valparaiso city, Florida",5035),
("Valrico CDP, Florida",38169),
("Vamo CDP, Florida",4862),
("Venice city, Florida",22601),
("Venice Gardens CDP, Florida",8370),
("Vernon city, Florida",685),
("Vero Beach city, Florida",16558),
("Vero Beach South CDP, Florida",24082),
("Verona Walk CDP, Florida",2334),
("Viera East CDP, Florida",11183),
("Viera West CDP, Florida",11116),
("Villano Beach CDP, Florida",2890),
("Villas CDP, Florida",12036),
("Vineyards CDP, Florida",3929),
("Virginia Gardens village, Florida",2504),
("Wabasso CDP, Florida",585),
("Wabasso Beach CDP, Florida",1839),
("Wacissa CDP, Florida",349),
("Wahneta CDP, Florida",5484),
("Waldo city, Florida",840),
("Wallace CDP, Florida",2005),
("Warm Mineral Springs CDP, Florida",5149),
("Warrington CDP, Florida",14487),
("Washington Park CDP, Florida",1799),
("Watergate CDP, Florida",2810),
("Watertown CDP, Florida",3507),
("Wauchula city, Florida",4857),
("Waukeenah CDP, Florida",179),
("Wausau town, Florida",422),
("Waverly CDP, Florida",922),
("Webster city, Florida",671),
("Wedgefield CDP, Florida",7701),
("Weeki Wachee city, Florida",2),
("Weeki Wachee Gardens CDP, Florida",1539),
("Wekiwa Springs CDP, Florida",23329),
("Welaka town, Florida",677),
("Wellington village, Florida",63681),
("Wesley Chapel CDP, Florida",55046),
("West Bradenton CDP, Florida",4324),
("Westchase CDP, Florida",23636),
("Westchester CDP, Florida",30210),
("West DeLand CDP, Florida",4310),
("Westgate CDP, Florida",10026),
("Westlake city, Florida",2),
("West Lealman CDP, Florida",15076),
("West Little River CDP, Florida",33396),
("West Melbourne city, Florida",21471),
("West Miami city, Florida",7373),
("Weston city, Florida",70220),
("West Palm Beach city, Florida",108365),
("West Park city, Florida",14924),
("West Pensacola CDP, Florida",21243),
("West Perrine CDP, Florida",10491),
("West Samoset CDP, Florida",7708),
("West Vero Corridor CDP, Florida",8661),
("Westview CDP, Florida",11068),
("Westville town, Florida",300),
("Westwood Lakes CDP, Florida",12121),
("Wewahitchka city, Florida",2342),
("Whiskey Creek CDP, Florida",5030),
("White City CDP, Florida",4566),
("White Springs town, Florida",755),
("Whitfield CDP (Manatee County), Florida",3504),
("Whitfield CDP (Santa Rosa County), Florida",202),
("Wildwood city, Florida",6557),
("Williamsburg CDP, Florida",7967),
("Williston city, Florida",2683),
("Williston Highlands CDP, Florida",1842),
("Willow Oak CDP, Florida",6930),
("Wilton Manors city, Florida",12534),
("Wimauma CDP, Florida",7621),
("Windermere town, Florida",3346),
("Windsor CDP, Florida",200),
("Winter Beach CDP, Florida",2340),
("Winter Garden city, Florida",42263),
("Winter Haven city, Florida",39615),
("Winter Park city, Florida",30189),
("Winter Springs city, Florida",35694),
("Wiscon CDP, Florida",699),
("Woodlawn Beach CDP, Florida",2844),
("Woodville CDP, Florida",2461),
("World Golf Village CDP, Florida",16538),
("Worthington Springs town, Florida",435),
("Wright CDP, Florida",22991),
("Yalaha CDP, Florida",1200),
("Yankeetown town, Florida",585),
("Yeehaw Junction CDP, Florida",320),
("Yulee CDP, Florida",12616),
("Zellwood CDP, Florida",3079),
("Zephyrhills city, Florida",14851),
("Zephyrhills North CDP, Florida",2599),
("Zephyrhills South CDP, Florida",5163),
("Zephyrhills West CDP, Florida",5281),
("Zolfo Springs town, Florida",1872),
("Abbeville city, Georgia",2792),
("Acworth city, Georgia",22336),
("Adairsville city, Georgia",4792),
("Adel city, Georgia",5290),
("Adrian city, Georgia",558),
("Ailey city, Georgia",563),
("Alamo town, Georgia",3362),
("Alapaha town, Georgia",814),
("Albany city, Georgia",74631),
("Aldora town, Georgia",123),
("Allenhurst city, Georgia",915),
("Allentown city, Georgia",123),
("Alma city, Georgia",3474),
("Alpharetta city, Georgia",64672),
("Alston town, Georgia",185),
("Alto town, Georgia",1308),
("Ambrose city, Georgia",354),
("Americus city, Georgia",15659),
("Andersonville city, Georgia",240),
("Appling CDP, Georgia",1010),
("Arabi town, Georgia",645),
("Aragon city, Georgia",1382),
("Arcade city, Georgia",1554),
("Argyle town, Georgia",217),
("Arlington city, Georgia",1656),
("Arnoldsville city, Georgia",429),
("Ashburn city, Georgia",3691),
("Athens-Clarke County unified government (balance), Georgia",123310),
("Atlanta city, Georgia",479655),
("Attapulgus city, Georgia",571),
("Auburn city, Georgia",7303),
("Augusta-Richmond County consolidated government (balance), Georgia",196807),
("Austell city, Georgia",7276),
("Avalon town, Georgia",185),
("Avera city, Georgia",269),
("Avondale Estates city, Georgia",3135),
("Baconton city, Georgia",1001),
("Bainbridge city, Georgia",12152),
("Baldwin city, Georgia",3482),
("Ball Ground city, Georgia",1993),
("Barnesville city, Georgia",6608),
("Bartow town, Georgia",268),
("Barwick city, Georgia",296),
("Baxley city, Georgia",4697),
("Bellville city, Georgia",145),
("Belvedere Park CDP, Georgia",15543),
("Berkeley Lake city, Georgia",1994),
("Berlin city, Georgia",679),
("Bethlehem town, Georgia",930),
("Between town, Georgia",328),
("Bishop town, Georgia",358),
("Blackshear city, Georgia",3485),
("Blairsville city, Georgia",625),
("Blakely city, Georgia",4690),
("Bloomingdale city, Georgia",2726),
("Blue Ridge city, Georgia",1242),
("Bluffton town, Georgia",68),
("Blythe city, Georgia",700),
("Bogart town, Georgia",1390),
("Bonanza CDP, Georgia",3421),
("Boston city, Georgia",1325),
("Bostwick city, Georgia",294),
("Bowdon city, Georgia",2134),
("Bowersville town, Georgia",465),
("Bowman city, Georgia",849),
("Boykin CDP, Georgia",52),
("Braselton town, Georgia",10509),
("Braswell town, Georgia",455),
("Bremen city, Georgia",6311),
("Brinson town, Georgia",226),
("Bronwood town, Georgia",271),
("Brookhaven city, Georgia",53140),
("Brooklet city, Georgia",1751),
("Brooks town, Georgia",509),
("Broxton city, Georgia",1254),
("Brunswick city, Georgia",16034),
("Buchanan city, Georgia",1435),
("Buckhead town, Georgia",155),
("Buena Vista city, Georgia",2104),
("Buford city, Georgia",15185),
("Butler city, Georgia",1989),
("Byromville town, Georgia",529),
("Byron city, Georgia",5108),
("Cadwell town, Georgia",489),
("Cairo city, Georgia",9528),
("Calhoun city, Georgia",16708),
("Calvary CDP, Georgia",110),
("Camak town, Georgia",117),
("Camilla city, Georgia",5000),
("Candler-McAfee CDP, Georgia",24022),
("Canon city, Georgia",1002),
("Canoochee CDP, Georgia",125),
("Canton city, Georgia",27127),
("Carl town, Georgia",235),
("Carlton city, Georgia",429),
("Carnesville city, Georgia",743),
("Carrollton city, Georgia",26397),
("Cartersville city, Georgia",20467),
("Cave Spring city, Georgia",1128),
("Cecil city, Georgia",343),
("Cedar Springs CDP, Georgia",122),
("Cedartown city, Georgia",9930),
("Centerville city, Georgia",7671),
("Centralhatchee town, Georgia",455),
("Chamblee city, Georgia",28748),
("Chatsworth city, Georgia",4271),
("Chattahoochee Hills city, Georgia",2867),
("Chattanooga Valley CDP, Georgia",3727),
("Chauncey city, Georgia",369),
("Cherry Log CDP, Georgia",47),
("Chester town, Georgia",2121),
("Chickamauga city, Georgia",3145),
("Clarkesville city, Georgia",1706),
("Clarkston city, Georgia",12762),
("Claxton city, Georgia",2359),
("Clayton city, Georgia",1838),
("Clermont town, Georgia",902),
("Cleveland city, Georgia",3834),
("Climax city, Georgia",178),
("Cobbtown city, Georgia",366),
("Cochran city, Georgia",4869),
("Cohutta city, Georgia",539),
("Colbert city, Georgia",624),
("Coleman CDP, Georgia",19),
("College Park city, Georgia",14819),
("Collins city, Georgia",573),
("Colquitt city, Georgia",2194),
("Columbus city, Georgia",196670),
("Comer city, Georgia",1320),
("Commerce city, Georgia",6742),
("Concord city, Georgia",298),
("Conley CDP, Georgia",6002),
("Conyers city, Georgia",15882),
("Coolidge city, Georgia",416),
("Cordele city, Georgia",10793),
("Cornelia city, Georgia",4290),
("Country Club Estates CDP, Georgia",8160),
("Covington city, Georgia",13844),
("Crawford city, Georgia",943),
("Crawfordville city, Georgia",564),
("Crooked Creek CDP, Georgia",497),
("Culloden | |
<reponame>ekkipermana/robotframework-test
"""PyAlaCarte and PyAlaMode editors."""
__author__ = "<NAME> <<EMAIL>>"
__cvsid__ = "$Id: editor.py 63478 2010-02-13 22:59:44Z RD $"
__revision__ = "$Revision: 63478 $"[11:-2]
import wx
from buffer import Buffer
import crust
import dispatcher
import editwindow
import frame
from shell import Shell
import version
class EditorFrame(frame.Frame):
"""Frame containing one editor."""
def __init__(self, parent=None, id=-1, title='PyAlaCarte',
pos=wx.DefaultPosition, size=(800, 600),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE,
filename=None):
"""Create EditorFrame instance."""
frame.Frame.__init__(self, parent, id, title, pos, size, style)
self.buffers = {}
self.buffer = None # Current buffer.
self.editor = None
self._defaultText = title + ' - the tastiest Python editor.'
self._statusText = self._defaultText
self.SetStatusText(self._statusText)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self._setup()
if filename:
self.bufferCreate(filename)
def _setup(self):
"""Setup prior to first buffer creation.
Useful for subclasses."""
pass
def setEditor(self, editor):
self.editor = editor
self.buffer = self.editor.buffer
self.buffers[self.buffer.id] = self.buffer
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaCarte'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnClose(self, event):
"""Event handler for closing."""
for buffer in self.buffers.values():
self.buffer = buffer
if buffer.hasChanged():
cancel = self.bufferSuggestSave()
if cancel and event.CanVeto():
event.Veto()
return
self.Destroy()
def OnIdle(self, event):
"""Event handler for idle time."""
self._updateStatus()
if hasattr(self, 'notebook'):
self._updateTabText()
self._updateTitle()
event.Skip()
def _updateStatus(self):
"""Show current status information."""
if self.editor and hasattr(self.editor, 'getStatus'):
status = self.editor.getStatus()
text = 'File: %s | Line: %d | Column: %d' % status
else:
text = self._defaultText
if text != self._statusText:
self.SetStatusText(text)
self._statusText = text
def _updateTabText(self):
"""Show current buffer information on notebook tab."""
## suffix = ' **'
## notebook = self.notebook
## selection = notebook.GetSelection()
## if selection == -1:
## return
## text = notebook.GetPageText(selection)
## window = notebook.GetPage(selection)
## if window.editor and window.editor.buffer.hasChanged():
## if text.endswith(suffix):
## pass
## else:
## notebook.SetPageText(selection, text + suffix)
## else:
## if text.endswith(suffix):
## notebook.SetPageText(selection, text[:len(suffix)])
def _updateTitle(self):
"""Show current title information."""
title = self.GetTitle()
if self.bufferHasChanged():
if title.startswith('* '):
pass
else:
self.SetTitle('* ' + title)
else:
if title.startswith('* '):
self.SetTitle(title[2:])
def hasBuffer(self):
"""Return True if there is a current buffer."""
if self.buffer:
return True
else:
return False
def bufferClose(self):
"""Close buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferDestroy()
cancel = False
return cancel
def bufferCreate(self, filename=None):
"""Create new buffer."""
self.bufferDestroy()
buffer = Buffer()
self.panel = panel = wx.Panel(parent=self, id=-1)
panel.Bind (wx.EVT_ERASE_BACKGROUND, lambda x: x)
editor = Editor(parent=panel)
panel.editor = editor
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(editor.window, 1, wx.EXPAND)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
sizer.Layout()
buffer.addEditor(editor)
buffer.open(filename)
self.setEditor(editor)
self.editor.setFocus()
self.SendSizeEvent()
def bufferDestroy(self):
"""Destroy the current buffer."""
if self.buffer:
for editor in self.buffer.editors.values():
editor.destroy()
self.editor = None
del self.buffers[self.buffer.id]
self.buffer = None
self.panel.Destroy()
def bufferHasChanged(self):
"""Return True if buffer has changed since last save."""
if self.buffer:
return self.buffer.hasChanged()
else:
return False
def bufferNew(self):
"""Create new buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
if self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = openSingle(directory=filedir)
if result.path:
self.bufferCreate(result.path)
cancel = False
return cancel
## def bufferPrint(self):
## """Print buffer."""
## pass
## def bufferRevert(self):
## """Revert buffer to version of file on disk."""
## pass
def bufferSave(self):
"""Save buffer to its file."""
if self.buffer.doc.filepath:
self.buffer.save()
cancel = False
else:
cancel = self.bufferSaveAs()
return cancel
def bufferSaveAs(self):
"""Save buffer to a new filename."""
if self.bufferHasChanged() and self.buffer.doc.filepath:
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = saveSingle(directory=filedir)
if result.path:
self.buffer.saveAs(result.path)
cancel = False
else:
cancel = True
return cancel
def bufferSuggestSave(self):
"""Suggest saving changes. Return True if user selected Cancel."""
result = messageDialog(parent=None,
message='%s has changed.\n'
'Would you like to save it first'
'?' % self.buffer.name,
title='Save current file?')
if result.positive:
cancel = self.bufferSave()
else:
cancel = result.text == 'Cancel'
return cancel
def updateNamespace(self):
"""Update the buffer namespace for autocompletion and calltips."""
if self.buffer.updateNamespace():
self.SetStatusText('Namespace updated')
else:
self.SetStatusText('Error executing, unable to update namespace')
class EditorNotebookFrame(EditorFrame):
"""Frame containing one or more editors in a notebook."""
def __init__(self, parent=None, id=-1, title='PyAlaMode',
pos=wx.DefaultPosition, size=(800, 600),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE,
filename=None):
"""Create EditorNotebookFrame instance."""
self.notebook = None
EditorFrame.__init__(self, parent, id, title, pos,
size, style, filename)
if self.notebook:
dispatcher.connect(receiver=self._editorChange,
signal='EditorChange', sender=self.notebook)
def _setup(self):
"""Setup prior to first buffer creation.
Called automatically by base class during init."""
self.notebook = EditorNotebook(parent=self)
intro = 'Py %s' % version.VERSION
import imp
module = imp.new_module('__main__')
import __builtin__
module.__dict__['__builtins__'] = __builtin__
namespace = module.__dict__.copy()
self.crust = crust.Crust(parent=self.notebook, intro=intro, locals=namespace)
self.shell = self.crust.shell
# Override the filling so that status messages go to the status bar.
self.crust.filling.tree.setStatusText = self.SetStatusText
# Override the shell so that status messages go to the status bar.
self.shell.setStatusText = self.SetStatusText
# Fix a problem with the sash shrinking to nothing.
self.crust.filling.SetSashPosition(200)
self.notebook.AddPage(page=self.crust, text='*Shell*', select=True)
self.setEditor(self.crust.editor)
self.crust.editor.SetFocus()
def _editorChange(self, editor):
"""Editor change signal receiver."""
self.setEditor(editor)
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaMode'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def _updateTitle(self):
"""Show current title information."""
pass
## title = self.GetTitle()
## if self.bufferHasChanged():
## if title.startswith('* '):
## pass
## else:
## self.SetTitle('* ' + title)
## else:
## if title.startswith('* '):
## self.SetTitle(title[2:])
def bufferCreate(self, filename=None):
"""Create new buffer."""
buffer = Buffer()
panel = wx.Panel(parent=self.notebook, id=-1)
panel.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: x)
editor = Editor(parent=panel)
panel.editor = editor
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(editor.window, 1, wx.EXPAND)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
sizer.Layout()
buffer.addEditor(editor)
buffer.open(filename)
self.setEditor(editor)
self.notebook.AddPage(page=panel, text=self.buffer.name, select=True)
self.editor.setFocus()
def bufferDestroy(self):
"""Destroy the current buffer."""
selection = self.notebook.GetSelection()
## print "Destroy Selection:", selection
if selection > 0: # Don't destroy the PyCrust tab.
if self.buffer:
del self.buffers[self.buffer.id]
self.buffer = None # Do this before DeletePage().
self.notebook.DeletePage(selection)
def bufferNew(self):
"""Create new buffer."""
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
filedir = ''
if self.buffer and self.buffer.doc.filedir:
filedir = self.buffer.doc.filedir
result = openMultiple(directory=filedir)
for path in result.paths:
self.bufferCreate(path)
cancel = False
return cancel
class EditorNotebook(wx.Notebook):
"""A notebook containing a page for each editor."""
def __init__(self, parent):
"""Create EditorNotebook instance."""
wx.Notebook.__init__(self, parent, id=-1, style=wx.CLIP_CHILDREN)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging, id=self.GetId())
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged, id=self.GetId())
self.Bind(wx.EVT_IDLE, self.OnIdle)
def OnIdle(self, event):
"""Event handler for idle time."""
self._updateTabText()
event.Skip()
def _updateTabText(self):
"""Show current buffer display name on all but first tab."""
size = 3
changed = ' **'
unchanged = ' --'
selection = self.GetSelection()
if selection < 1:
return
text = self.GetPageText(selection)
window = self.GetPage(selection)
if not window.editor:
return
if text.endswith(changed) or text.endswith(unchanged):
name = text[:-size]
else:
name = text
if name != window.editor.buffer.name:
text = window.editor.buffer.name
if window.editor.buffer.hasChanged():
if text.endswith(changed):
text = None
elif text.endswith(unchanged):
text = text[:-size] + changed
else:
text += changed
else:
if text.endswith(changed):
text = text[:-size] + unchanged
elif text.endswith(unchanged):
text = None
else:
text += unchanged
if text is not None:
self.SetPageText(selection, text)
self.Refresh() # Needed on Win98.
def OnPageChanging(self, event):
"""Page changing event handler."""
event.Skip()
def OnPageChanged(self, event):
"""Page changed event handler."""
new = event.GetSelection()
window = self.GetPage(new)
dispatcher.send(signal='EditorChange', sender=self,
editor=window.editor)
window.SetFocus()
event.Skip()
class EditorShellNotebookFrame(EditorNotebookFrame):
"""Frame containing a notebook containing EditorShellNotebooks."""
def __init__(self, parent=None, id=-1, title='PyAlaModeTest',
pos=wx.DefaultPosition, size=(600, 400),
style=wx.DEFAULT_FRAME_STYLE,
filename=None, singlefile=False):
"""Create EditorShellNotebookFrame instance."""
self._singlefile = singlefile
EditorNotebookFrame.__init__(self, parent, id, title, pos,
size, style, filename)
def _setup(self):
"""Setup prior to first buffer creation.
Called automatically by base class during init."""
if not self._singlefile:
self.notebook = EditorNotebook(parent=self)
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyAlaModePlus'
text = 'Another fine, flaky program.'
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def bufferCreate(self, filename=None):
"""Create new buffer."""
if self._singlefile:
self.bufferDestroy()
notebook = EditorShellNotebook(parent=self,
filename=filename)
self.notebook = notebook
else:
notebook = EditorShellNotebook(parent=self.notebook,
filename=filename)
self.setEditor(notebook.editor)
if not self._singlefile:
self.notebook.AddPage(page=notebook, text=self.buffer.name,
select=True)
self.editor.setFocus()
def bufferDestroy(self):
"""Destroy the current buffer."""
if self.buffer:
self.editor = None
del self.buffers[self.buffer.id]
self.buffer = None # Do this before DeletePage().
if self._singlefile:
self.notebook.Destroy()
self.notebook = None
else:
selection = self.notebook.GetSelection()
## print "Destroy Selection:", selection
self.notebook.DeletePage(selection)
def bufferNew(self):
"""Create new buffer."""
if self._singlefile and self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
self.bufferCreate()
cancel = False
return cancel
def bufferOpen(self):
"""Open file in buffer."""
if self._singlefile and self.bufferHasChanged():
cancel = self.bufferSuggestSave()
if cancel:
return cancel
filedir | |
<filename>typings/bl_ui/space_view3d.py
import sys
import typing
import bpy_types
import bl_ui.properties_grease_pencil_common
import bl_ui.space_toolsystem_common
class BoneOptions:
def draw(self, context):
'''
'''
pass
class ShowHideMenu:
bl_label = None
''' '''
def draw(self, _context):
'''
'''
pass
class TOPBAR_MT_edit_armature_add(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TOPBAR_MT_edit_curve_add(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
bl_translation_context = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TOPBAR_PT_annotation_layers(
bpy_types.Panel, bpy_types._GenericUI,
bl_ui.properties_grease_pencil_common.AnnotationDataPanel):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
bl_ui_units_x = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def draw_layers(self, context, layout, gpd):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TOPBAR_PT_gpencil_materials(
bl_ui.properties_grease_pencil_common.GreasePencilMaterialsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
bl_ui_units_x = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TOPBAR_PT_gpencil_vertexcolor(
bl_ui.properties_grease_pencil_common.GreasePencilVertexcolorPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
bl_ui_units_x = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_HT_header(bpy_types.Header, bpy_types._GenericUI):
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_xform_template(self, layout, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_HT_tool_header(bpy_types.Header, bpy_types._GenericUI):
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, | |
model.with_bagua([optimizer], ...)
.. note::
Each process should be associated to a CUDA device using `torch.cuda.set_device()`,
before calling :meth:`init_process_group`. Otherwise you may encounter the
`fatal runtime error: Rust cannot catch foreign exceptions` error.
"""
global _default_pg
global _default_store
global _autotune_service_port
if _default_pg is not None:
raise RuntimeError("trying to initialize the default process group twice!")
if _default_store is not None:
raise RuntimeError("The default store has been initialized else where!")
if store is None:
timeout = timedelta(minutes=30)
store, _, _ = next(torch.distributed.rendezvous(url="env://", timeout=timeout))
store.set_timeout(timeout)
_default_store = store
else:
_default_store = store
_autotune_service_port = _find_free_bagua_service_port(_default_store)
if get_rank() == 0 and _autotune_server is None:
start_autotune_server(_autotune_service_port)
AUTOTUNE_SERVER_WAIT_TIME = 30
wait_time = get_autotune_server_wait_time()
# at least wait 30 seconds
if wait_time < AUTOTUNE_SERVER_WAIT_TIME:
wait_time = AUTOTUNE_SERVER_WAIT_TIME
start = time.time()
service_ready = False
while (time.time() - start) < wait_time:
client = get_hyperparameters_service_client()
service_ready = client.health_check()
if service_ready:
break
if not service_ready:
raise Exception("Warning! autotune service not ready after {} seconds. "
"You can adjust this duration through "
"`BAGUA_AUTOTUNE_SERVER_WAIT_TIME` environment variable.".format(wait_time))
# TODO remove the dependency on torch process group
if not dist.is_initialized():
torch.distributed.init_process_group(
backend="nccl",
store=_default_store,
rank=get_rank(),
world_size=get_world_size(),
) # fmt: off
_default_pg = new_group(stream=torch.cuda.Stream(priority=-1))
def broadcast_nccl_unique_id(comm_key: str, root):
global _default_store
if get_rank() == root:
idstr = B.BaguaSingleCommunicatorPy.generate_nccl_unique_id_str()
_default_store.set(comm_key, idstr)
else:
idstr = _default_store.get(comm_key)
idstr = str(idstr, encoding="utf-8")
return idstr
class comm(object):
WORLD = object()
class CommMember(object):
# Alias to group.WORLD for backward compatibility
WORLD = comm.WORLD
NON_COMM_MEMBER = object()
def send(tensor: torch.Tensor, dst: int, comm: Optional[B.BaguaSingleCommunicatorPy] = None):
r"""Sends a tensor to :attr:`dst` synchronously.
Args:
tensor: Tensor to send.
dst: Destination rank.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
"""
if _rank_not_in_comm(comm):
return
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.send(tensor.to_bagua_tensor().bagua_backend_tensor(), dst)
comm.cuda_stream.synchronize()
def recv(tensor: torch.Tensor, src: int, comm: Optional[B.BaguaSingleCommunicatorPy] = None):
r"""Receives a tensor synchronously.
Args:
tensor: Tensor to fill with received data.
src: Source rank.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
"""
if _rank_not_in_comm(comm):
return
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.recv(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
comm.cuda_stream.synchronize()
def broadcast_coalesced(tensors, src=0, comm: Optional[B.BaguaSingleCommunicatorPy] = None):
if _rank_not_in_comm(comm):
return
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.broadcast(coalesced.to_bagua_tensor().bagua_backend_tensor(), src)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
comm.cuda_stream.synchronize()
# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This function is copied fron Hovorod: https://github.com/horovod/horovod
# with minor changes.
def broadcast_object(obj: object, src: int = 0, comm: Optional[B.BaguaSingleCommunicatorPy] = None) -> object:
"""Serializes and broadcasts an object from root rank to all other processes.
Typical usage is to broadcast the ``optimizer.state_dict()``, for example:
>>> state_dict = broadcast_object(optimizer.state_dict(), 0)
>>> if get_rank() > 0:
>>> optimizer.load_state_dict(state_dict)
Args:
obj: An object capable of being serialized without losing any context.
src: The rank of the process from which parameters will be broadcasted to all other processes.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
Returns:
The object that was broadcasted from the :attr:`src`.
.. note::
This operation will move data to GPU before communication and back to CPU after communication, and it requires
CPU-GPU synchronization.
"""
if get_rank() == src:
b = io.BytesIO()
pickle.dump(obj, b)
t = torch.cuda.ByteTensor(bytearray(b.getvalue()))
# TODO: use IntTensor after int32 communication is supported
sz = torch.cuda.LongTensor([t.shape[0]])
broadcast(sz, src, comm)
else:
sz = torch.cuda.LongTensor([0])
broadcast(sz, src, comm)
t = torch.cuda.ByteTensor(sz.tolist()[0])
broadcast(t, src, comm)
if get_rank() != src:
buf = io.BytesIO(t.cpu().numpy().tobytes())
obj = pickle.load(buf)
return obj
def broadcast(tensor: torch.Tensor, src: int = 0, comm: Optional[B.BaguaSingleCommunicatorPy] = None):
r"""Broadcasts the tensor to all processes associated with the communicator.
:attr:`tensor` must have the same number of elements in all processes
participating in the collective.
Args:
tensor: Data to be sent if :attr:`src` is the rank of
current process, and tensor to be used to save received data
otherwise.
src: Source rank. Default: 0.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
"""
if _rank_not_in_comm(comm):
return
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.broadcast(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
# TODO: remove
comm.cuda_stream.synchronize()
def reduce(
send_tensor: torch.Tensor,
recv_tensor: torch.Tensor,
dst: int,
op: ReduceOp = ReduceOp.SUM,
comm: Optional[B.BaguaSingleCommunicatorPy] = None,
):
r"""Reduces the tensor data across all processes.
Only the process whit rank :attr:`dst` is going to receive the final result.
Args:
send_tensor: Input of the collective.
recv_tensor: Output of the collective, must have the same size with :attr:`send_tensor`.
dst: Destination rank.
op: One of the values from :class:`ReduceOp`
enum. Specifies an operation used for element-wise reductions.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
"""
if _rank_not_in_comm(comm):
return
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
dst,
int(op),
)
comm.cuda_stream.synchronize()
def reduce_inplace(
tensor: torch.Tensor, dst: int, op: ReduceOp = ReduceOp.SUM, comm: Optional[B.BaguaSingleCommunicatorPy] = None
):
r"""The in-place version of :func:`reduce`."""
if _rank_not_in_comm(comm):
return
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), dst, int(op)
)
comm.cuda_stream.synchronize()
def allreduce_coalesced_inplace(
tensors,
op: ReduceOp = ReduceOp.SUM,
comm: Optional[B.BaguaSingleCommunicatorPy] = None,
):
if _rank_not_in_comm(comm):
return
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None or comm is CommMember.WORLD:
comm = _get_default_group().get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.allreduce_inplace(
coalesced.to_bagua_tensor("allreduce_coalesced"), int(op)
)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
comm.cuda_stream.synchronize()
def allreduce(
send_tensor: torch.Tensor,
recv_tensor: torch.Tensor,
op: ReduceOp = ReduceOp.SUM,
comm: Optional[B.BaguaSingleCommunicatorPy] = None,
):
"""Reduces the tensor data across all processes associated with the communicator in such a way that all get
the final result. After the call :attr:`recv_tensor` is going to be bitwise identical
in all processes.
Args:
send_tensor: Input of the collective.
recv_tensor: Output of the collective, must have the same size with :attr:`send_tensor`.
op: One of the values from :class:`ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm: A handle of the Bagua communicator to work on. By default, the global
communicator of the default process group will be used.
Examples::
>>> from bagua.torch_api import allreduce
>>>
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> send_tensor = torch.arange(2, dtype=torch.int64, device=tensor.device) + 1 + 2 * rank
>>> recv_tensor = torch.zeros(2, dtype=torch.int64, device=tensor.device)
>>> send_tensor
tensor([1, 2], | |
sym(str name, Sparsity sp, int p) -> [DM]
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> [DM]
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[DM]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[DM]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenDM_sym(*args)
sym = staticmethod(sym)
def zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> DM
zeros((int,int) rc) -> DM
zeros(Sparsity sp) -> DM
zero.
"""
return _casadi.GenDM_zeros(*args)
zeros = staticmethod(zeros)
def ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> DM
ones((int,int) rc) -> DM
ones(Sparsity sp) -> DM
one.
"""
return _casadi.GenDM_ones(*args)
ones = staticmethod(ones)
def __init__(self, *args):
"""
GenDM()
GenDM(GenDM other)
"""
this = _casadi.new_GenDM(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _casadi.delete_GenDM
GenDM_swigregister = _casadi.GenDM_swigregister
GenDM_swigregister(GenDM)
def GenDM_sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> DM
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> DM
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> DM
Create symbolic primitive with a given sparsity pattern.
sym(str name, Sparsity sp, int p) -> [DM]
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> [DM]
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[DM]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[DM]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenDM_sym(*args)
def GenDM_zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> DM
zeros((int,int) rc) -> DM
zeros(Sparsity sp) -> DM
zero.
"""
return _casadi.GenDM_zeros(*args)
def GenDM_ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> DM
ones((int,int) rc) -> DM
ones(Sparsity sp) -> DM
one.
"""
return _casadi.GenDM_ones(*args)
class GenSX(GenericMatrixCommon, SparsityInterfaceCommon):
"""
"""
__swig_setmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GenSX, name, value)
__swig_getmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GenSX, name)
__repr__ = _swig_repr
def nnz(self, *args):
"""
Get the number of (structural) non-zero elements.
nnz(self) -> int
"""
return _casadi.GenSX_nnz(self, *args)
def nnz_lower(self, *args):
"""
Get the number of non-zeros in the lower triangular half.
nnz_lower(self) -> int
"""
return _casadi.GenSX_nnz_lower(self, *args)
def nnz_upper(self, *args):
"""
Get the number of non-zeros in the upper triangular half.
nnz_upper(self) -> int
"""
return _casadi.GenSX_nnz_upper(self, *args)
def nnz_diag(self, *args):
"""
Get get the number of non-zeros on the diagonal.
nnz_diag(self) -> int
"""
return _casadi.GenSX_nnz_diag(self, *args)
def numel(self, *args):
"""
Get the number of elements.
numel(self) -> int
"""
return _casadi.GenSX_numel(self, *args)
def size1(self, *args):
"""
Get the first dimension (i.e. number of rows)
size1(self) -> int
"""
return _casadi.GenSX_size1(self, *args)
def rows(self, *args):
"""
Get the number of rows, Octave-style syntax.
rows(self) -> int
"""
return _casadi.GenSX_rows(self, *args)
def size2(self, *args):
"""
Get the second dimension (i.e. number of columns)
size2(self) -> int
"""
return _casadi.GenSX_size2(self, *args)
def columns(self, *args):
"""
Get the number of columns, Octave-style syntax.
columns(self) -> int
"""
return _casadi.GenSX_columns(self, *args)
def dim(self, *args):
"""
Get string representation of dimensions. The representation is e.g. "4x5"
dim(self, bool with_nz) -> str
or "4x5,10nz".
"""
return _casadi.GenSX_dim(self, *args)
def size(self, *args):
"""
Get the size along a particular dimensions.
size(self) -> (int,int)
Get the shape.
size(self, int axis) -> int
> size(self)
------------------------------------------------------------------------
Get the shape.
> size(self, int axis)
------------------------------------------------------------------------
Get the size along a particular dimensions.
"""
return _casadi.GenSX_size(self, *args)
def is_empty(self, *args):
"""
Check if the sparsity is empty, i.e. if one of the dimensions is zero (or
is_empty(self, bool both) -> bool
optionally both dimensions)
"""
return _casadi.GenSX_is_empty(self, *args)
def is_dense(self, *args):
"""
Check if the matrix expression is dense.
is_dense(self) -> bool
"""
return _casadi.GenSX_is_dense(self, *args)
def is_scalar(self, *args):
"""
Check if the matrix expression is scalar.
is_scalar(self, bool scalar_and_dense) -> bool
"""
return _casadi.GenSX_is_scalar(self, *args)
def is_square(self, *args):
"""
Check if the matrix expression is square.
is_square(self) -> bool
"""
return _casadi.GenSX_is_square(self, *args)
def is_vector(self, *args):
"""
Check if the matrix is a row or column vector.
is_vector(self) -> bool
"""
return _casadi.GenSX_is_vector(self, *args)
def is_row(self, *args):
"""
Check if the matrix is a row vector (i.e. size1()==1)
is_row(self) -> bool
"""
return _casadi.GenSX_is_row(self, *args)
def is_column(self, *args):
"""
Check if the matrix is a column vector (i.e. size2()==1)
is_column(self) -> bool
"""
return _casadi.GenSX_is_column(self, *args)
def is_triu(self, *args):
"""
Check if the matrix is upper triangular.
is_triu(self) -> bool
"""
return _casadi.GenSX_is_triu(self, *args)
def is_tril(self, *args):
"""
Check if the matrix is lower triangular.
is_tril(self) -> bool
"""
return _casadi.GenSX_is_tril(self, *args)
def row(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
row(self) -> [int]
row(self, int el) -> int
"""
return _casadi.GenSX_row(self, *args)
def colind(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
colind(self) -> [int]
colind(self, int col) -> int
"""
return _casadi.GenSX_colind(self, *args)
def sparsity(self, *args):
"""
Get the sparsity pattern.
sparsity(self) -> Sparsity
"""
return _casadi.GenSX_sparsity(self, *args)
def sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> SX
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> SX
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> SX
Create symbolic primitive with a given sparsity pattern.
sym(str name, Sparsity sp, int p) -> [SX]
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int | |
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": "payment",
"date": payment.date.strftime(DATE_INPUT_FORMAT),
"total": 500
}
)
data.update(header_data)
invoices_to_match_against_orig = invoices
invoices_as_dicts = [ to_dict(invoice) for invoice in invoices ]
invoices_to_match_against = [ get_fields(invoice, ['type', 'ref', 'total', 'paid', 'due', 'id']) for invoice in invoices_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(invoices_to_match_against, {"id": "matched_to"}, {"value": 200})
matching_forms[-1]["value"] = 50
# Remember we changing EXISTING instances so we need to post the id of the instance also
matching_forms[0]["id"] = matches[0].pk
matching_forms[1]["id"] = matches[1].pk
# So we are trying to match 3 x 1000.00 invoices fully to a 2000.00 payment
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 2
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
payment = PurchaseHeader.objects.get(pk=payment.pk)
self.assertEqual(payment.total, -500)
self.assertEqual(payment.due, -50)
self.assertEqual(payment.paid, -450)
invoices = PurchaseHeader.objects.filter(type="pi")
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
self.assertEqual(len(invoices), 3)
self.assertEqual(invoices[0].total, 1200)
self.assertEqual(invoices[0].due, 1000)
self.assertEqual(invoices[0].paid, 200)
self.assertEqual(invoices[1].total, 1200)
self.assertEqual(invoices[1].due, 1000)
self.assertEqual(invoices[1].paid, 200)
self.assertEqual(invoices[2].total, 1200)
self.assertEqual(invoices[2].due, 1150)
self.assertEqual(invoices[2].paid, 50)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 3)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 200)
self.assertEqual(matches[1].matched_by, payment)
self.assertEqual(matches[1].matched_to, invoices[1])
self.assertEqual(matches[1].value, 200)
self.assertEqual(matches[2].matched_by, payment)
self.assertEqual(matches[2].matched_to, invoices[2])
self.assertEqual(matches[2].value, 50)
# test_8 but with new transactions
# INCORRECT USAGE
# Payment total is decreased
# Match value of transaction is decreased so is ok on the header
# But now the match value total is not valid
# Example. 100 payment is matched to a 200 payment and a 300 invoice.
# The payment is reduced to 80. And only 150.00 of the payment is now matched
# This isn't allowed as obviously a 80 + 150 payment cannot pay a 300 invoice
def test_14(self):
self.client.force_login(self.user)
# IN FACT WE WILL JUST MATCH A PAYMENT TO A POSITIVE AND NEGATIVE INVOICE
# SET UP
payment = create_payments(self.supplier, 'payment', 1,self.period, value=1000)[0]
invoices = []
invoices += create_invoices(self.supplier, "invoice", 1,self.period, 2000)
invoices += create_invoices(self.supplier, "invoice", 1,self.period, -1000)
invoices += create_invoices(self.supplier, "invoice", 1,self.period, -1000)
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
match(payment, [ ( invoices[0], 2000 ), ( invoices[1], -1000) ])
headers = PurchaseHeader.objects.all()
headers = sort_multiple(headers, *[ (lambda h : h.pk, False) ])
self.assertEqual(len(headers), 4)
self.assertEqual(headers[0].pk, payment.pk)
self.assertEqual(headers[0].total, -1000)
self.assertEqual(headers[0].paid, -1000)
self.assertEqual(headers[0].due, 0)
self.assertEqual(headers[1].pk, invoices[0].pk)
self.assertEqual(headers[1].total, 2400)
self.assertEqual(headers[1].paid, 2000)
self.assertEqual(headers[1].due, 400)
self.assertEqual(headers[2].pk, invoices[1].pk)
self.assertEqual(headers[2].total, -1200)
self.assertEqual(headers[2].paid, -1000)
self.assertEqual(headers[2].due, -200)
self.assertEqual(headers[3].pk, invoices[2].pk)
self.assertEqual(headers[3].total, -1200)
self.assertEqual(headers[3].paid, 0)
self.assertEqual(headers[3].due, -1200)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 2000)
self.assertEqual(matches[1].matched_by, payment)
self.assertEqual(matches[1].matched_to, invoices[1])
self.assertEqual(matches[1].value, -1000)
payment.refresh_from_db()
invoices = PurchaseHeader.objects.filter(type="pi")
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
url = reverse("purchases:edit", kwargs={"pk": payment.pk})
# CHANGES
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": "payment",
"date": payment.date.strftime(DATE_INPUT_FORMAT),
"total": 800
}
)
data.update(header_data)
invoices_to_match_against_orig = invoices
invoices_as_dicts = [ to_dict(invoice) for invoice in invoices ]
invoices_to_match_against = [ get_fields(invoice, ['type', 'ref', 'total', 'paid', 'due', 'id']) for invoice in invoices_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects([invoices_to_match_against[0]], {"id": "matched_to"}, {"value": 1000})
matching_forms += add_and_replace_objects([invoices_to_match_against[1]], {"id": "matched_to"}, {"value": -900})
matching_forms += add_and_replace_objects([invoices_to_match_against[2]], {"id": "matched_to"}, {"value": -900})
matching_forms[0]["id"] = matches[0].pk
matching_forms[1]["id"] = matches[1].pk
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 2
data.update(create_formset_data(LINE_FORM_PREFIX, []))
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
payment = PurchaseHeader.objects.get(pk=payment.pk)
self.assertEqual(payment.total, -1000)
self.assertEqual(payment.due, 0)
self.assertEqual(payment.paid, -1000)
invoices = PurchaseHeader.objects.filter(type="pi")
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
self.assertEqual(len(invoices), 3)
self.assertEqual(invoices[0].total, 2400)
self.assertEqual(invoices[0].due, 400)
self.assertEqual(invoices[0].paid, 2000)
self.assertEqual(invoices[1].total, -1200)
self.assertEqual(invoices[1].due, -200)
self.assertEqual(invoices[1].paid, -1000)
self.assertEqual(invoices[2].total, -1200)
self.assertEqual(invoices[2].due, -1200)
self.assertEqual(invoices[2].paid, 0)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 2000)
self.assertEqual(matches[1].matched_by, payment)
self.assertEqual(matches[1].matched_to, invoices[1])
self.assertEqual(matches[1].value, -1000)
self.assertContains(
response,
'<li class="py-1">Please ensure the total of the transactions you are matching is between 0 and 800.00</li>',
html=True
)
# CORRECT USAGE
# WE INCREASE THE MATCH VALUE OF THE MATCH TRANSACTION WHERE THE HEADER BEING EDITED
# IS THE MATCHED_TO HEADER
def test_15(self):
self.client.force_login(self.user)
# create the payment
payment = create_payments(self.supplier, 'payment', 1,self.period, value=1000)[0]
# create the invoice - THIS IS WHAT WE ARE EDITING
invoices = []
invoices += create_invoices(self.supplier, "invoice", 2,self.period, 1000)
# SECOND INVOICE
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
match_by, match_to = match(payment, [ (invoices[0], 200) ] ) # FIRST MATCH
payment = match_by
match_by, match_to = match(invoices[1], [ (payment, -600) ]) # SECOND MATCH
headers = PurchaseHeader.objects.all()
headers = sort_multiple(headers, *[ (lambda h : h.pk, False) ])
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0].pk, payment.pk)
self.assertEqual(headers[0].total, -1000)
self.assertEqual(headers[0].paid, -800)
self.assertEqual(headers[0].due, -200)
self.assertEqual(headers[1].pk, invoices[0].pk)
self.assertEqual(headers[1].total, 1200)
self.assertEqual(headers[1].paid, 200)
self.assertEqual(headers[1].due, 1000)
self.assertEqual(headers[2].pk, invoices[1].pk)
self.assertEqual(headers[2].total, 1200)
self.assertEqual(headers[2].paid, 600)
self.assertEqual(headers[2].due, 600)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 200)
self.assertEqual(matches[1].matched_by, invoices[1])
self.assertEqual(matches[1].matched_to, payment)
self.assertEqual(matches[1].value, -600)
url = reverse("purchases:edit", kwargs={"pk": payment.pk})
# CHANGES
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": headers[0].ref,
"date": headers[0].date.strftime(DATE_INPUT_FORMAT),
"total": 1000
}
)
data.update(header_data)
matching_trans = [ invoices[0], invoices[1] ]
matching_trans_as_dicts = [ to_dict(m) for m in matching_trans ]
matching_trans = [ get_fields(m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects([matching_trans[0]], {"id": "matched_to"}, {"value": 200}) # THIS IS LIKE ALL THE OTHER TESTS
matching_forms[0]["id"] = matches[0].pk
# THIS IS THE DIFFERENCE
matching_trans[1]["id"] = matches[1].pk
matching_trans[1]["matched_to"] = payment.pk # THIS IS NOT NEEDED FOR VALIDATION LOGIC BUT IS A REQUIRED FIELD
matching_trans[1]["value"] = 800
matching_forms.append(matching_trans[1])
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 2
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(line_data)
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
headers = sort_multiple(headers, *[ (lambda h : h.pk, False) ])
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0].pk, payment.pk)
self.assertEqual(headers[0].total, -1000)
self.assertEqual(headers[0].paid, -1000)
self.assertEqual(headers[0].due, 0)
self.assertEqual(headers[1].pk, invoices[0].pk)
self.assertEqual(headers[1].total, 1200)
self.assertEqual(headers[1].paid, 200)
self.assertEqual(headers[1].due, 1000)
self.assertEqual(headers[2].pk, invoices[1].pk)
self.assertEqual(headers[2].total, 1200)
self.assertEqual(headers[2].paid, 800)
self.assertEqual(headers[2].due, 400)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 200)
self.assertEqual(matches[1].matched_by, invoices[1])
self.assertEqual(matches[1].matched_to, payment)
self.assertEqual(matches[1].value, -800)
# CORRECT USAGE
# WE DECREASE THE MATCH VALUE OF THE MATCH TRANSACTION WHERE THE HEADER BEING EDITED
# IS THE MATCHED_TO HEADER
def test_16(self):
self.client.force_login(self.user)
# create the payment
payment = create_payments(self.supplier, 'payment', 1,self.period, value=1000)[0]
# create the invoice - THIS IS WHAT WE ARE EDITING
invoices = []
invoices += create_invoices(self.supplier, "invoice", 2,self.period, 1000)
# SECOND INVOICE
invoices = sort_multiple(invoices, *[ (lambda i : i.pk, False) ])
match_by, match_to = match(payment, [ (invoices[0], 200) ] ) # FIRST MATCH
payment = match_by
match_by, match_to = match(invoices[1], [ (payment, -600) ]) # SECOND MATCH
headers = PurchaseHeader.objects.all()
headers = sort_multiple(headers, *[ (lambda h : h.pk, False) ])
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0].pk, payment.pk)
self.assertEqual(headers[0].total, -1000)
self.assertEqual(headers[0].paid, -800)
self.assertEqual(headers[0].due, -200)
self.assertEqual(headers[1].pk, invoices[0].pk)
self.assertEqual(headers[1].total, 1200)
self.assertEqual(headers[1].paid, 200)
self.assertEqual(headers[1].due, 1000)
self.assertEqual(headers[2].pk, invoices[1].pk)
self.assertEqual(headers[2].total, 1200)
self.assertEqual(headers[2].paid, 600)
self.assertEqual(headers[2].due, 600)
matches = PurchaseMatching.objects.all()
matches = sort_multiple(matches, *[ (lambda m : m.pk, False) ])
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].matched_by, payment)
self.assertEqual(matches[0].matched_to, invoices[0])
self.assertEqual(matches[0].value, 200)
self.assertEqual(matches[1].matched_by, invoices[1])
self.assertEqual(matches[1].matched_to, payment)
self.assertEqual(matches[1].value, -600)
url = reverse("purchases:edit", kwargs={"pk": payment.pk})
# CHANGES
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": headers[0].ref,
"date": headers[0].date.strftime(DATE_INPUT_FORMAT),
"total": 1000
}
)
data.update(header_data)
matching_trans = [ invoices[0], invoices[1] ]
matching_trans_as_dicts = [ to_dict(m) for m in matching_trans ]
matching_trans = [ get_fields(m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects([matching_trans[0]], {"id": "matched_to"}, {"value": 200}) # THIS IS LIKE ALL THE OTHER TESTS
matching_forms[0]["id"] = matches[0].pk
# THIS IS THE DIFFERENCE
matching_trans[1]["id"] = matches[1].pk
matching_trans[1]["matched_to"] = payment.pk # THIS IS NOT NEEDED FOR VALIDATION LOGIC BUT IS A REQUIRED FIELD
matching_trans[1]["value"] = 0
matching_forms.append(matching_trans[1])
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 2
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(line_data)
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
headers = sort_multiple(headers, *[ | |
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class indEntEd
class indEtt(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, indEtt)
if subclass is not None:
return subclass(*args_, **kwargs_)
if indEtt.subclass:
return indEtt.subclass(*args_, **kwargs_)
else:
return indEtt(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='indEtt', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('indEtt')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='indEtt')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='indEtt', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='indEtt'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='indEtt', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class indEtt
class nrRegEtt(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrRegEtt)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nrRegEtt.subclass:
return nrRegEtt.subclass(*args_, **kwargs_)
else:
return nrRegEtt(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nrRegEtt', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrRegEtt')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nrRegEtt')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nrRegEtt', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrRegEtt'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nrRegEtt', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nrRegEtt
class dadosIsencao(GeneratedsSuper):
"""Informações Complementares - Empresas Isentas - Dados da Isenção"""
subclass = None
superclass = None
def __init__(self, ideMinLei=None, nrCertif=None, dtEmisCertif=None, dtVencCertif=None, nrProtRenov=None, dtProtRenov=None, dtDou=None, pagDou=None):
self.original_tagname_ = None
self.ideMinLei = ideMinLei
self.nrCertif = nrCertif
if isinstance(dtEmisCertif, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtEmisCertif, '%Y-%m-%d').date()
else:
initvalue_ = dtEmisCertif
self.dtEmisCertif = initvalue_
if isinstance(dtVencCertif, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtVencCertif, '%Y-%m-%d').date()
else:
initvalue_ = dtVencCertif
self.dtVencCertif = initvalue_
self.nrProtRenov = nrProtRenov
if isinstance(dtProtRenov, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtProtRenov, '%Y-%m-%d').date()
else:
initvalue_ = dtProtRenov
self.dtProtRenov = initvalue_
if isinstance(dtDou, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(dtDou, '%Y-%m-%d').date()
else:
initvalue_ = dtDou
self.dtDou = initvalue_
self.pagDou = pagDou
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, dadosIsencao)
if subclass is not None:
return subclass(*args_, **kwargs_)
if dadosIsencao.subclass:
return dadosIsencao.subclass(*args_, **kwargs_)
else:
return dadosIsencao(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ideMinLei(self): return self.ideMinLei
def set_ideMinLei(self, ideMinLei): self.ideMinLei = ideMinLei
def get_nrCertif(self): return self.nrCertif
def set_nrCertif(self, nrCertif): self.nrCertif = nrCertif
def get_dtEmisCertif(self): return self.dtEmisCertif
def set_dtEmisCertif(self, dtEmisCertif): self.dtEmisCertif = dtEmisCertif
def get_dtVencCertif(self): return self.dtVencCertif
def set_dtVencCertif(self, dtVencCertif): self.dtVencCertif = dtVencCertif
def get_nrProtRenov(self): return self.nrProtRenov
def set_nrProtRenov(self, nrProtRenov): self.nrProtRenov = nrProtRenov
def get_dtProtRenov(self): return self.dtProtRenov
def set_dtProtRenov(self, dtProtRenov): self.dtProtRenov = dtProtRenov
def get_dtDou(self): return self.dtDou
def set_dtDou(self, dtDou): self.dtDou = dtDou
def get_pagDou(self): return self.pagDou
def set_pagDou(self, pagDou): self.pagDou = pagDou
def hasContent_(self):
if (
self.ideMinLei is not None or
self.nrCertif is not None or
self.dtEmisCertif is not None or
self.dtVencCertif is not None or
self.nrProtRenov is not None or
self.dtProtRenov is not None or
self.dtDou is not None or
self.pagDou is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='dadosIsencao', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('dadosIsencao')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='dadosIsencao')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='dadosIsencao', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dadosIsencao'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='dadosIsencao', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ideMinLei is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sideMinLei>%s</%sideMinLei>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.ideMinLei), input_name='ideMinLei')), namespace_, eol_))
if self.nrCertif is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrCertif>%s</%snrCertif>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrCertif), input_name='nrCertif')), namespace_, eol_))
if self.dtEmisCertif is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtEmisCertif>%s</%sdtEmisCertif>%s' % (namespace_, self.gds_format_date(self.dtEmisCertif, input_name='dtEmisCertif'), namespace_, eol_))
if self.dtVencCertif is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtVencCertif>%s</%sdtVencCertif>%s' % (namespace_, self.gds_format_date(self.dtVencCertif, input_name='dtVencCertif'), namespace_, eol_))
if self.nrProtRenov is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snrProtRenov>%s</%snrProtRenov>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrProtRenov), input_name='nrProtRenov')), namespace_, eol_))
if self.dtProtRenov is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtProtRenov>%s</%sdtProtRenov>%s' % (namespace_, self.gds_format_date(self.dtProtRenov, input_name='dtProtRenov'), namespace_, eol_))
if self.dtDou is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdtDou>%s</%sdtDou>%s' % (namespace_, self.gds_format_date(self.dtDou, input_name='dtDou'), namespace_, eol_))
if self.pagDou is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spagDou>%s</%spagDou>%s' % (namespace_, self.gds_format_integer(self.pagDou, input_name='pagDou'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ideMinLei':
ideMinLei_ = child_.text
ideMinLei_ = self.gds_validate_string(ideMinLei_, node, 'ideMinLei')
self.ideMinLei = ideMinLei_
elif nodeName_ == 'nrCertif':
nrCertif_ = child_.text
nrCertif_ = self.gds_validate_string(nrCertif_, node, 'nrCertif')
self.nrCertif = nrCertif_
elif nodeName_ == 'dtEmisCertif':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtEmisCertif = dval_
elif nodeName_ == 'dtVencCertif':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtVencCertif = dval_
elif nodeName_ == 'nrProtRenov':
nrProtRenov_ = child_.text
nrProtRenov_ = self.gds_validate_string(nrProtRenov_, node, 'nrProtRenov')
self.nrProtRenov = nrProtRenov_
elif nodeName_ == 'dtProtRenov':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtProtRenov = dval_
elif nodeName_ == 'dtDou':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.dtDou = dval_
elif nodeName_ == 'pagDou':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'pagDou')
self.pagDou = ival_
# end class dadosIsencao
class ideMinLei(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ideMinLei)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ideMinLei.subclass:
return ideMinLei.subclass(*args_, **kwargs_)
else:
return ideMinLei(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ideMinLei', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ideMinLei')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ideMinLei')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ideMinLei', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ideMinLei'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ideMinLei', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ideMinLei
class nrCertif(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nrCertif)
if subclass is not None:
return subclass(*args_, **kwargs_)
| |
import discord
from discord.ext import commands
from . import utils
from .utils import data_type, wiki, checks, config, modding, token
import json
from bs4 import BeautifulSoup as BS
import re
import json
import traceback
from urllib.parse import quote
import hashlib
#==================================================================================================================================================
INF = float("inf")
GFWIKI_BASE = "https://iopwiki.com"
GFWIKI_API = f"{GFWIKI_BASE}/api.php"
GFLANALYSIS_BASE = "https://www.gflanalysis.com"
GFLANALYSIS_API = f"{GFLANALYSIS_BASE}/w/api.php"
MOBILITY = {
"AR": 10,
"SMG": 12,
"HG": 15,
"RF": 7,
"MG": 4,
"SG": 6
}
CRIT_RATE = {
"AR": 20,
"SMG": 5,
"HG": 20,
"RF": 40,
"MG": 5,
"SG": 20
}
AMMO_COST = {
"AR": (20, 60),
"SMG": (30, 90),
"HG": (10, 30),
"RF": (30, 90),
"MG": (40, 140),
"SG": (30, 90)
}
RATION_COST = {
"AR": (20, 60),
"SMG": (20, 60),
"HG": (10, 30),
"RF": (15, 55),
"MG": (30, 90),
"SG": (40, 140)
}
ARMOR_PENETRATION = 15
STANDARD_EQUIPMENTS = {
"AR": {
"accessory": ["telescopic_sight", "red_dot_sight", "holographic_sight", "silencer", "night_equipment"],
"magazine": ["hv_ammo"],
"doll": ["exoskeleton", "chip"]
},
"SMG": {
"accessory": ["telescopic_sight", "red_dot_sight", "holographic_sight", "silencer", "night_equipment"],
"magazine": ["hp_ammo"],
"doll": ["exoskeleton", "chip"]
},
"HG": {
"accessory": ["silencer", "night_equipment"],
"magazine": ["hp_ammo"],
"doll": ["exoskeleton", "chip"]
},
"RF": {
"accessory": ["telescopic_sight", "red_dot_sight", "holographic_sight", "silencer"],
"magazine": ["ap_ammo"],
"doll": ["camo_cape", "chip"]
},
"MG": {
"accessory": ["telescopic_sight", "red_dot_sight", "holographic_sight"],
"magazine": ["ap_ammo"],
"doll": ["ammo_box", "chip"]
},
"SG": {
"accessory": ["telescopic_sight", "red_dot_sight", "holographic_sight", "night_equipment"],
"magazine": ["shotgun_ammo"],
"doll": ["armor_plate", "chip"]
},
}
EQUIPMENT_ORDER = {
"AR": ["accessory", "magazine", "doll"],
"SMG": ["doll", "magazine", "accessory"],
"HG": ["accessory", "magazine", "doll"],
"RF": ["magazine", "accessory", "doll"],
"MG": ["magazine", "accessory", "doll"],
"SG": ["doll", "magazine", "accessory"]
}
MOD_RARITY = {
"2": 4,
"3": 4,
"4": 5,
"5": 6
}
STATS_NORMALIZE = {
"clipsize": "clip_size",
"nightpenaltyreduction": "peq",
"criticaldamage": "crit_dmg",
"criticalhitdamage": "crit_dmg",
"armorpiercing": "armor_penetration",
"armorpenetration": "armor_penetration",
"armourpenetration": "armor_penetration",
"armourpiercing": "armor_penetration",
"evasion": "evasion",
"criticalhitrate": "crit_rate",
"criticalhitchance": "crit_rate",
"targets": "shotgun_ammo",
"rateoffire": "rof",
"accuracy": "accuracy",
"armor": "armor",
"armour": "armor",
"movementspeed": "mobility",
"damage": "damage",
"boostabilityeffectiveness":"boost_skill_effect"
}
STAT_DISPLAY = {
"damage": ("DMG", ""),
"rof": ("ROF", ""),
"accuracy": ("ACC", ""),
"evasion": ("EVA", ""),
"crit_rate": ("CRIT RATE", "%"),
"crit_dmg": ("CRIT DMG", "%"),
"armor": ("ARMOR", ""),
"clip_size": ("ROUNDS", ""),
"armor_penetration": ("AP", ""),
"mobility": ("MOBILITY", ""),
"peq": ("NIGHT VISION", "%"),
"shotgun_ammo": ("TARGETS", "")
}
def get_equipment_slots(classification, order, *, add_ap=False, add_armor=False):
eq = STANDARD_EQUIPMENTS[classification]
ret = []
for i in order:
if add_ap and i == "magazine":
e = eq[i] + ["ap_ammo"]
elif add_armor and i == "doll":
e = eq[i] + ["armor_plate"]
else:
e = eq[i]
ret.append(e)
return ret
wiki_timer_regex = re.compile(r"(\d{1,2})\:(\d{2})\:(\d{2})")
def timer_to_seconds(s):
m = wiki_timer_regex.match(s)
if m:
return int(m.group(1)) * 3600 + int(m.group(2)) * 60
else:
return None
timer_regex = re.compile(r"(\d{0,2})\:?(\d{2})")
def get_either(container, *keys, default=None):
for key in keys:
try:
return container[key]
except (IndexError, KeyError):
pass
else:
return default
def mod_keys(key):
return "mod3_" + key, "mod2_" + key, "mod1_" + key, key
name_clean_regex = re.compile(r"[\.\-\s\/]")
def normalize(s):
return s.replace("\u2215", "/")
shorten_regex = re.compile(r"(submachine\s?gun|assault\s?rifle|rifle|hand\s?gun|machine\sgun|shotgun)s?\s*(?:\(\w{2,3}\))?", re.I)
def shorten_repl(m):
base = m.group(1).lower()
if base.startswith("sub"):
return "SMG"
elif base.startswith("assault"):
return "AR"
elif base.startswith("hand"):
return "HG"
elif base.startswith("machine"):
return "MG"
elif base.startswith("rifle"):
return "RF"
elif base.startswith("shot"):
return "SG"
else:
return m.group(0)
def shorten_types(text):
return shorten_regex.sub(shorten_repl, text)
def to_float(any_obj, *, default=None):
try:
return float(any_obj)
except:
return default
def generate_image_url(filename, *, base=GFWIKI_BASE):
return f"{base}/{wiki.generate_image_path(filename)}"
#==================================================================================================================================================
parser = wiki.WikitextParser()
@parser.set_box_handler("PlayableUnit")
@parser.set_box_handler("Equipment")
@parser.set_box_handler("Fairy")
def handle_base_box(box, **kwargs):
return kwargs
@parser.set_box_handler("voice actor name")
@parser.set_box_handler("artist name")
@parser.set_box_handler("icon")
def handle_creator(box, name):
return name
@parser.set_box_handler("doll_server_alias")
def handle_alias(box, *, server, alias):
if server == "EN":
return alias
else:
return ""
@parser.set_box_handler("doll name")
@parser.set_box_handler("equip name")
@parser.set_box_handler("enemy name")
@parser.set_box_handler("fairy name")
def handle_name(box, name, *args, **kwargs):
return name
@parser.set_box_handler("HG aura")
@parser.set_box_handler("HG_aura")
def handle_hg_aura(box, value):
return value + "%"
@parser.set_box_handler("spoiler")
def handle_spoiler(box, value):
return f"||{value}||"
@parser.set_box_handler("cite")
@parser.set_box_handler("cite ab1")
@parser.set_box_handler("stub")
@parser.set_box_handler("wip")
@parser.set_box_handler("Cleanup")
def handle_misc(box, *args, **kwargs):
return ""
@parser.set_box_handler(None)
def default_handler(box, *args, **kwargs):
raise ValueError(f"Handler for {box} doesn't exist.")
@parser.set_reference_handler
def handle_reference(box, *args, **kwargs):
if box.startswith(":Category:"):
return box[10:]
else:
return box
@parser.set_html_handler
def handle_html(tag, text, **kwargs):
if tag == "ref":
return ""
elif kwargs.get("class") == "spoiler":
return "||" + "".join(parser.parse(text)) + "||"
else:
return text
def maybe_int(nstr, default=0):
try:
n = float(nstr)
except (ValueError, TypeError):
return default
else:
intn = int(n)
if n - intn == 0:
return intn
else:
return n
skill_regex = re.compile(r"\(\$(\w+)\)")
simple_br_regex = re.compile(r"\<br\s*\/\>")
@parser.set_table_handler("gf-table")
def handle_skill_table(class_, table):
parsed = {r[0]: r[1:] for r in table}
effect = skill_regex.sub(lambda m: parsed[m.group(1)][9], parsed["text"][-1])
effect = simple_br_regex.sub(lambda m: "\n", effect)
return {
"name": parsed["name"][-1],
"effect": effect,
"icd": maybe_int(parsed.get("initial", [None])[-1]),
"cd": maybe_int(parsed.get("cooldown", [None])[-1])
}
#==================================================================================================================================================
gflanalysis_parser = wiki.WikitextParser()
@gflanalysis_parser.set_html_handler
def handle_gfla_html(tag, text, **kwargs):
if tag == "sup":
return ""
else:
return text
@gflanalysis_parser.set_reference_handler
def handle_gfla_reference(box, *args, **kwargs):
if box.startswith("Category:"):
return ""
else:
return box
#==================================================================================================================================================
class Doll(data_type.BaseObject):
@property
def qual_name(self):
return self.en_name or self.name
def _base_info(self, ctx):
emojis = ctx.cog.emojis
embeds = []
for skill_effect in utils.split_page(self.skill["effect"], 900, check=lambda s: s=="\n", fix=" \u27a1 "):
embed = discord.Embed(
title=f"#{self.index} {self.qual_name}",
color=discord.Color.green(),
url=f"{GFWIKI_BASE}/wiki/{quote(self.name)}"
)
embed.add_field(name="Classification", value=f"{emojis[self.classification]} **{self.classification}**")
embed.add_field(name="Rarity", value=str(emojis["rank"])*utils.to_int(self.rarity, default=0) or "**EXTRA**")
embed.add_field(
name="Production time",
value=f"{self.craft_time//3600}:{self.craft_time%3600//60:0>2d}" if self.craft_time else "Non-craftable",
inline=False
)
embed.add_field(
name="Stats",
value=
f"{emojis['hp']}**HP:** {self.max_hp} (x5)\n"
f"{emojis['damage']}**DMG:** {self.max_dmg}\n"
f"{emojis['accuracy']}**ACC:** {self.max_acc}"
+
(f"\n{emojis['armor']}**ARMOR:** {self.max_armor}" if self.max_armor > 0 else "")
)
embed.add_field(
name="\u200b",
value=
f"{emojis['rof']}**ROF:** {self.max_rof}\n"
f"{emojis['evasion']}**EVA:** {self.max_eva}\n"
f"{emojis['crit_rate']}**CRIT RATE:** {self.crit_rate}%"
+
(f"\n{emojis['clip_size']}**ROUNDS:** {self.clip_size}" if self.clip_size > 0 else "")
)
tile = {
k: emojis["blue_square"] if v==1 else emojis["white_square"] if v==0 else emojis["black_square"]
for k, v in self.tile["shape"].items()
}
embed.add_field(
name="Tile",
value=
f"\u200b {tile['7']}{tile['8']}{tile['9']}\u2001{shorten_types(self.tile['target'])}\n"
f"\u200b {tile['4']}{tile['5']}{tile['6']}\u2001{self.tile['effect'][0]}\n"
f"\u200b {tile['1']}{tile['2']}{tile['3']}\u2001{self.tile['effect'][1]}",
inline=False
)
skill = self.skill
icd = f"Initial CD: {skill['icd']}s" if skill["icd"] else None
cd = f"CD: {skill['cd']}s" if skill["cd"] else None
if cd or icd:
add = " (" + "/".join(filter(None, (icd, cd))) + ")"
else:
add = ""
embed.add_field(
name="Skill",
value=
f"**{skill['name']}**{add}\n"
f"{skill_effect}",
inline=False
)
embeds.append(embed)
return embeds
def _other_info(self, ctx):
embeds = []
for trivia in utils.split_page(self.trivia, 1000, check=lambda s: s=="\n", fix=" \u27a1 "):
embed = discord.Embed(
title=f"#{self.index} {self.qual_name}",
color=discord.Color.green(),
url=f"{GFWIKI_BASE}/wiki/{quote(self.name)}"
)
embed.add_field(name="Full name", value=self.full_name or "?")
embed.add_field(name="Origin", value=self.origin)
embed.add_field(name="Illustrator", value=self.artist or "None")
embed.add_field(name="Voice Actor", value=self.voice_actor or "None")
embed.add_field(name="Trivia", value=trivia or "None", inline=False)
embeds.append(embed)
return embeds
def _mod_info(self, ctx):
emojis = ctx.cog.emojis
mod = self.mod_data
embeds = []
for skill_index in range(2):
for i, skill_effect in enumerate(utils.split_page(mod["skill"][skill_index]["effect"], 1000, check=lambda s:s=="\n", fix=" \u27a1 ")):
while i > len(embeds) - 1:
embed = discord.Embed(
title=f"#{self.index} {self.en_name or self.name} Mod",
color=discord.Color.green(),
url=f"{GFWIKI_BASE}/wiki/{quote(self.name)}"
)
embed.add_field(name="Classification", value=f"{emojis[self.classification]} **{self.classification}**")
embed.add_field(name="Rarity", value=str(emojis["rank"])*MOD_RARITY[self.rarity])
embed.add_field(
name="Production time",
value=f"{self.craft_time//3600}:{self.craft_time%3600//60:0>2d}" if self.craft_time else "Non-craftable",
inline=False
)
embed.add_field(
name="Stats",
value=
f"{emojis['hp']}**HP:** {mod['max_hp']} (x5)\n"
f"{emojis['damage']}**DMG:** {mod['max_dmg']}\n"
f"{emojis['accuracy']}**ACC:** {mod['max_acc']}"
+
(f"\n{emojis['armor']}**ARMOR:** {mod['max_armor']}" if mod["max_armor"] > 0 else "")
)
embed.add_field(
name="\u200b",
value=
f"{emojis['rof']}**ROF:** {mod['max_rof']}\n"
f"{emojis['evasion']}**EVA:** {mod['max_eva']}\n"
f"{emojis['crit_rate']}**CRIT RATE:** {self.crit_rate}%"
+
(f"\n{emojis['clip_size']}**ROUNDS:** {mod['clip_size']}" if mod["clip_size"] > 0 else "")
)
tile = {
k: emojis["blue_square"] if v==1 else emojis["white_square"] if v==0 else emojis["black_square"]
for k, v in mod["tile"]["shape"].items()
}
embed.add_field(
name="Tile",
value=
f"\u200b {tile['7']}{tile['8']}{tile['9']}\u2001{shorten_types(mod['tile']['target'])}\n"
f"\u200b {tile['4']}{tile['5']}{tile['6']}\u2001{mod['tile']['effect'][0]}\n"
f"\u200b {tile['1']}{tile['2']}{tile['3']}\u2001{mod['tile']['effect'][1]}",
inline=False
)
embeds.append(embed)
cur = embeds[i]
skill = mod["skill"][skill_index]
icd = f"Initial CD: {skill['icd']}s" if skill["icd"] else None
cd = f"CD: {skill['cd']}s" if skill["cd"] else None
if cd or icd:
add = " (" + "/".join(filter(None, (icd, cd))) + ")"
else:
add = ""
cur.add_field(
name=f"Skill {skill_index+1}",
value=
f"**{skill['name']}**{add}\n"
f"{skill_effect}",
inline=False
)
return embeds
async def display_info(self, ctx):
emojis = ctx.cog.emojis
paging = utils.Paginator([])
base_info = self._base_info(ctx)
other_info = self._other_info(ctx)
skins = self.skins
analysis = {}
speq_info = await self.query_speq(ctx)
saved = {
"info": None,
"info_iter": None,
"skins": None,
"skin_iter": None,
"current_skin": (None, None)
}
def add_image():
index, skin = saved["current_skin"]
if index is None:
saved["embed"].set_image(url=config.NO_IMG)
saved["embed"].set_footer(text=discord.Embed.Empty)
else:
saved["embed"].set_footer(text=f"Skin: {skin['name']} ({skin['form']}) - ({index+1}/{len(saved['skins'])})")
saved["embed"].set_image(url=skin["image_url"])
def change_info_to(info, skins, state="original"):
if saved["info"] is not info:
saved["info"] = info
saved["info_iter"] = data_type.circle_iter(info)
if saved["skins"] is not skins:
saved["skins"] = skins
saved["skin_iter"] = data_type.circle_iter(skins, with_index=True)
try:
saved["current_skin"] = next(saved["skin_iter"])
except ValueError:
saved["current_skin"] = (None, None)
saved["embed"] = next(saved["info_iter"])
saved["state"] = state
add_image()
@paging.wrap_action(emojis["damage"])
def change_base_info():
change_info_to(base_info, skins)
return saved["embed"]
@paging.wrap_action("\U0001f5d2")
def change_other_info():
change_info_to(other_info, skins)
return saved["embed"]
if self.moddable:
mod_info = self._mod_info(ctx)
mod_skins = self.mod_data["skins"]
@paging.wrap_action(emojis["mem_frag"])
def change_mod3_info():
change_info_to(mod_info, mod_skins, "mod")
return saved["embed"]
if speq_info["equipments"]:
speq_iter = data_type.circle_iter(speq_info["equipments"])
@paging.wrap_action(emojis["exoskeleton"])
def change_speq_info():
return next(speq_iter)
digest_iter = data_type.circle_iter(speq_info["digest"])
@paging.wrap_action("\U0001f52c")
def change_digest_info():
return next(digest_iter)
@paging.wrap_action("\U0001f50e")
async def change_analysis_info():
if not analysis:
analysis.update(await self.query_gflanalysis(ctx))
if "mod" in analysis:
state = saved["state"]
else:
state = "original"
analysis_info = analysis[state]
analysis_skins = mod_skins if state == "mod" else skins
change_info_to(analysis_info, analysis_skins, state)
return saved["embed"]
@paging.wrap_action("\U0001f5bc")
def change_image():
saved["current_skin"] = next(saved["skin_iter"])
add_image()
return saved["embed"]
await paging.navigate(ctx)
async def query_gflanalysis(self, ctx):
bytes_ = await ctx.bot.fetch(
GFLANALYSIS_API,
params={
"action": "ask",
"query": f"[[Name::~*{self.en_name}*]]|?Name|?Pros|?Cons|?Status|?Roles|?Analysis",
"format": "json",
"redirects": 1
}
)
data = json.loads(bytes_)
results = data["query"]["results"]
ret = {}
if results:
for name, raw in results.items():
embeds = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
# | - Import Modules
import os
import re
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
# __|
class PDOS_Plotting():
"""
"""
# | - PDOS_Plotting ***********************************
def __init__(self,
data_file_dir=".",
# out_data_file_dir="out_data",
):
"""
"""
# | - __init__
# Input attributes
self.data_file_dir = data_file_dir
# Attributes set during
self.total_dos_df = None
self.pdos_df = None
self.band_gap_df = None
self.ispin = None
# System name
data_file_dir = self.data_file_dir
sys_name_i = data_file_dir.split("/")[-1]
self.sys_name = sys_name_i
self.out_data_file_dir = os.path.join("out_data", sys_name_i)
self.read_INCAR()
# self.create_out_data_dir()
self.read_data_files()
# __|
def create_out_data_dir(self):
"""
"""
# | - create_out_data_dir
out_folder = self.out_data_file_dir
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# __|
def read_INCAR(self):
"""
"""
# | - read_INCAR
data_file_dir = self.data_file_dir
# Check spin polarized calculations:
try:
incar_file = open("INCAR", "r")
ispin = 1 # Non spin polarised calculations.
for line in incar_file:
if re.match("(.*)ISPIN(.*)2", line):
ispin = 2 # For spin polarised calculations.
except:
incar_file = open(data_file_dir + "/INCAR", "r")
ispin = 1 # Non spin polarised calculations.
for line in incar_file:
if re.match("(.*)ISPIN(.*)2", line):
ispin = 2 # For spin polarised calculations.
self.ispin = ispin
# __|
def read_data_files(self):
"""
"""
#| - read_data_files
data_file_dir = self.data_file_dir
# var_dir = "rapiDOS_out"
#| - Methods
def get_root_pdos_dir(var_dir, var_file):
"""
"""
#| - get_root_pdos_dir
root_pdos_dir = Path(
os.path.join(
data_file_dir,
var_dir,
var_file,
)
)
return(root_pdos_dir)
#__|
def read_data_file(filename):
"""
"""
#| - read_data_file
my_file_0 = Path(os.path.join(get_root_pdos_dir("", filename)))
my_file_1 = Path(os.path.join(get_root_pdos_dir("rapiDOS_out", filename)))
# print(my_file_1)
if my_file_0.is_file():
df = pd.read_csv(my_file_0)
elif my_file_1.is_file():
df = pd.read_csv(my_file_1)
else:
df = None
print("Woops! 8ewhrgw7yqewfi")
return(df)
#__|
# __|
total_dos_df = read_data_file("TotalDOS.csv")
pdos_df = read_data_file("PDOS.csv")
band_gap_df = read_data_file("BandGap.csv")
if total_dos_df is not None and \
pdos_df is not None and \
band_gap_df is not None:
# print("THIS ONE")
# band_gap = band_gap_df
band_gap_lower = band_gap_df['Lower Band Gap'][0]
band_gap_upper = band_gap_df['Upper Band Gap'][0]
# print('Approx. Band Gap:',np.round(np.abs(band_gap['Band Gap'][0]),3), "eV")
self.band_gap_lower = band_gap_lower
self.band_gap_upper = band_gap_upper
self.in_good_state = True
else:
self.in_good_state = False
print("else here")
self.total_dos_df = total_dos_df
self.pdos_df = pdos_df
self.band_gap_df = band_gap_df
# __|
def plot__total_dos(self):
"""
"""
# | - plot__total_dos
total_dos_df = self.total_dos_df
pdos_df = self.pdos_df
band_gap_df = self.band_gap_df
band_gap_lower = self.band_gap_lower
band_gap_upper = self.band_gap_upper
ispin = self.ispin
out_data_file_dir = self.out_data_file_dir
fig = plt.figure(figsize=(10.0,6.0)) # Create figure.
plt.axvline(x=[0.0], color='k', linestyle='--',linewidth=1.2) # Plot vertical line in Fermi.
# Plot DOS spin up.
plt.plot(
total_dos_df['Energy (E-Ef)'],
total_dos_df['Total DOS Spin Up'],
color='C3')
# Fill between spin up and down.
plt.fill_between(total_dos_df['Energy (E-Ef)'],
0, total_dos_df['Total DOS Spin Up'],
facecolor='C7', alpha=0.2, interpolate=True)
plt.axvspan(band_gap_lower, band_gap_upper, alpha=0.2, color='C5')
if ispin == 2:
# Plot DOS spin down
plt.plot(
total_dos_df['Energy (E-Ef)'],
-total_dos_df['Total DOS Spin Down'],
color='C4')
# Fill between spin up and down.
plt.fill_between(total_dos_df['Energy (E-Ef)'],
0, -total_dos_df['Total DOS Spin Up'],
facecolor='C7', alpha=0.2, interpolate=True)
plt.legend() # Add legend to the plot.
plt.xlabel('E - Ef (eV)') # x axis label.
plt.ylabel('DOS (states/eV)') # x axis label.
plt.xlim([-8.0,4.0]) # Plot limits.
# fig.savefig(out_folder + "/" + "Fig1.pdf") # Save figure EPS.
fig.savefig(out_data_file_dir + "/" + "Fig1.pdf") # Save figure EPS.
# __|
def plot__total_pz_dz2(self):
"""
"""
# | - plot__total_pz_dz2
total_dos_df = self.total_dos_df
pdos_df = self.pdos_df
band_gap_df = self.band_gap_df
band_gap_lower = self.band_gap_lower
band_gap_upper = self.band_gap_upper
ispin = self.ispin
out_data_file_dir = self.out_data_file_dir
fig = plt.figure(figsize=(10.0,6.0)) # Create figure.
pdos_energy_index_df = pdos_df.set_index(['Energy (E-Ef)']) # Set index.
# Sum same orbitals for all atoms:
sum_orbitals_df = pdos_energy_index_df.groupby(pdos_energy_index_df.index).sum()
plt.axvline(x=[0.0], color='k', linestyle='--',linewidth=1.2) # Plot vertical line in Fermi.
# Spin up.
plt.plot(sum_orbitals_df['pz_up'],color='C3')
plt.plot(sum_orbitals_df['dz2_up'],color='C8')
# Spin down.
if ispin == 2:
plt.plot(sum_orbitals_df['pz_down'],color='C3')
plt.plot(sum_orbitals_df['dz2_down'],color='C8')
plt.legend() # Add legend to the plot.
plt.xlabel('E - Ef (eV)') # x axis label.
plt.ylabel('DOS (states/eV)') # x axis label.
plt.xlim([-8.0,4.0]) # Plot limits.
fig.savefig(out_data_file_dir + "/" + "Fig2.pdf") # Save figure EPS.
# __|
def plot__spd(self):
"""
"""
# | - plot__spd
total_dos_df = self.total_dos_df
pdos_df = self.pdos_df
band_gap_df = self.band_gap_df
band_gap_lower = self.band_gap_lower
band_gap_upper = self.band_gap_upper
ispin = self.ispin
out_data_file_dir = self.out_data_file_dir
pdos_energy_index_df = pdos_df.set_index(['Energy (E-Ef)']) # Set index.
# Sum same orbitals for all atoms:
sum_orbitals_df = pdos_energy_index_df.groupby(pdos_energy_index_df.index).sum()
# Sum of orbitals for spin up:
sum_orbitals_df['Total p_up'] = sum_orbitals_df.apply(lambda row: row.px_up + row.py_up + row.pz_up, axis=1)
sum_orbitals_df['Total d_up'] = sum_orbitals_df.apply(lambda row: row.dxy_up + row.dyz_up + row.dxz_up + row.dz2_up + row.dx2_up, axis=1)
# Sum of orbitals for spin up:
if ispin == 2:
sum_orbitals_df['Total p_down'] = sum_orbitals_df.apply(lambda row: row.px_down + row.py_down + row.pz_down, axis=1)
sum_orbitals_df['Total d_down'] = sum_orbitals_df.apply(lambda row: row.dxy_down + row.dyz_down + row.dxz_down + row.dz2_down + row.dx2_down, axis=1)
# Plots:
fig = plt.figure(figsize=(10.0,6.0)) # Create figure.
plt.axvline(x=[0.0], color='k', linestyle='--',linewidth=1.2) # Plot vertical line in Fermi.
# Spin up:
plt.plot(sum_orbitals_df['s_up'],color='C1')
plt.plot(sum_orbitals_df['Total p_up'],color='C3')
plt.plot(sum_orbitals_df['Total d_up'],color='C8')
# Spin down:
if ispin == 2:
plt.plot(sum_orbitals_df['s_down'],color='C1')
plt.plot(sum_orbitals_df['Total p_down'],color='C3')
plt.plot(sum_orbitals_df['Total d_down'],color='C8')
plt.legend() # Add legend to the plot.
plt.xlabel('E - Ef (eV)') # x axis label.
plt.ylabel('DOS (states/eV)') # x axis label.
plt.xlim([-8.0,4.0]) # Plot limits.
fig.savefig(out_data_file_dir + "/" + "Fig3.pdf") # Save figure EPS.
# __|
def plot__atom(self, atom_selected):
"""
"""
#| - plot__atom
total_dos_df = self.total_dos_df
pdos_df = self.pdos_df
band_gap_df = self.band_gap_df
band_gap_lower = self.band_gap_lower
band_gap_upper = self.band_gap_upper
ispin = self.ispin
out_data_file_dir = self.out_data_file_dir
list_of_atoms = list(reversed(pdos_df['Atom Label'].unique()))
print('List of atoms: ', list_of_atoms)
""" Select one atom from the previous list. Remember list_of_atoms[0] corresponds to Atom #1,
list_of_atoms[1] to #2 ..."""
atom_selected = list_of_atoms[1] # This is equivalent to atom_selected = 'Cu2' in this example.
pdos_energy_index_df = pdos_df.set_index(['Energy (E-Ef)']) # Set index.
only_atom_df = pdos_energy_index_df[pdos_energy_index_df['Atom Label']==atom_selected] # Select only one atom (e.g Cu2).
atom_spin_up_df = only_atom_df.filter(regex="up").sum(axis=1) # Filter, get all bands with spin up. Then, sum all orbitals.
if ispin == 2:
atom_spin_down_df = only_atom_df.filter(regex="down").sum(axis=1) # Filter, get all bands with spin down. Then, sum all orbitals.
# Plots:
fig = plt.figure(figsize=(10.0,6.0)) # Create figure.
plt.plot(atom_spin_up_df,color='C1') # Atom spin up.
if ispin == 2:
plt.plot(atom_spin_down_df,color='C3') # Atom spin down.
plt.axvline(x=[0.0], color='k', linestyle='--',linewidth=1.2) # Plot vertical line in Fermi.
plt.legend(['Atom spin up']) # Add manually legend to the plot.
if ispin == 2: plt.legend(['Atom spin up','Atom spin down']) # Add manually legend to the plot.
plt.xlabel('E - Ef (eV)') # x axis label.
plt.ylabel('DOS (states/eV)') # x axis label.
plt.xlim([-8.0,4.0]) # Plot limits.
fig.savefig(out_data_file_dir + "/" + "Fig4__atom_states.pdf") # Save figure EPS.
# __|
# __| *************************************************
def calc_band_center(df):
"""
"""
#| - calc_band_center
df = df[
# (df.index > -10) & \
# (df.index < 4)
(df.index > -18) & \
(df.index < 1.5)
# (df.index > -10) & \
# (df.index < 2)
# (df.index > -8) & \
# (df.index < 2)
# (df.index > -6) & \
# (df.index < 2)
# (df.index > -4) & \
# (df.index < 2)
# (df.index > -2) & \
# (df.index < 2)
# #################################################
# (df.index > -10) & \
# (df.index < 0)
# (df.index > -10) & \
# (df.index < 2)
# (df.index > -10) & \
# (df.index < 4)
# (df.index > -10) & \
# (df.index < 6)
# (df.index > -10) & \
# (df.index < 8)
]
pho_i = df.values
eps = np.array(df.index.tolist())
band_center_up = np.trapz(pho_i * eps, x=eps) / np.trapz(pho_i, x=eps)
return(band_center_up)
#__|
def process_PDOS(
PDOS_i=None,
# atom_name_list=None,
):
"""
"""
#| - process_PDOS
data_dict_i = dict()
# #####################################################
orbitals_to_plot = [
"px_up", "py_up", "pz_up",
"px_down", "py_down", "pz_down",
]
df_xy = None
df_band_centers = None
was_processed = False
if PDOS_i.in_good_state:
was_processed = True
pdos_df = PDOS_i.pdos_df
pdos_df = pdos_df.set_index(["Energy (E-Ef)"])
cols_to_keep = orbitals_to_plot + ["Atom Label"]
df_i = pdos_df[cols_to_keep]
data = []
data_xy_dict = dict()
grouped = df_i.groupby(["Atom Label"])
for name, group in grouped:
data_dict_i = dict()
data_dict_i["name"] = name
spin_up_cols = [i for i in group.columns.tolist() if "up" in i]
spin_down_cols = [i for i in group.columns.tolist() if "down" in i]
px_sum = abs(group.px_up) + abs(group.px_down)
px_band_center = calc_band_center(px_sum)
py_sum = abs(group.py_up) + abs(group.py_down)
py_band_center = calc_band_center(py_sum)
pz_sum = abs(group.pz_up) + abs(group.pz_down)
pz_band_center = calc_band_center(pz_sum)
# #################################################
p_tot_sum = px_sum + py_sum + pz_sum
p_tot_band_center = calc_band_center(p_tot_sum)
data_dict_i["px_band_center"] = px_band_center
data_dict_i["py_band_center"] = py_band_center
data_dict_i["pz_band_center"] = pz_band_center
data_dict_i["p_tot_band_center"] = p_tot_band_center
data.append(data_dict_i)
df_xy = pd.DataFrame()
df_xy["px_sum"] = px_sum
| |
'\U0001f50e',
'lock_with_ink_pen': '\U0001f50f',
'closed_lock_with_key': '\U0001f510',
'lock': '\U0001f512',
'unlock': '\U0001f513',
'heart': '\U00002764\U0000fe0f',
'orange_heart': '\U0001f9e1',
'money_with_wings': '\U0001f4b8',
'dollar': '\U0001f4b5',
'yen': '\U0001f4b4',
'euro': '\U0001f4b6',
'pound': '\U0001f4b7',
'coin': '\U0001fa99',
'moneybag': '\U0001f4b0',
'credit_card': '\U0001f4b3',
'gem': '\U0001f48e',
'scales': '\U00002696\U0000fe0f',
'ladder': '\U0001fa9c',
'toolbox': '\U0001f9f0',
'screwdriver': '\U0001fa9b',
'wrench': '\U0001f527',
'hammer': '\U0001f528',
'hammer_pick': '\U00002692\U0000fe0f',
'hammer_and_pick': '\U00002692\U0000fe0f',
'tools': '\U0001f6e0\U0000fe0f',
'hammer_and_wrench': '\U0001f6e0\U0000fe0f',
'pick': '\U000026cf\U0000fe0f',
'nut_and_bolt': '\U0001f529',
'bricks': '\U0001f9f1',
'chains': '\U000026d3\U0000fe0f',
'hook': '\U0001fa9d',
'knot': '\U0001faa2',
'magnet': '\U0001f9f2',
'gun': '\U0001f52b',
'bomb': '\U0001f4a3',
'firecracker': '\U0001f9e8',
'axe': '\U0001fa93',
'carpentry_saw': '\U0001fa9a',
'knife': '\U0001f52a',
'dagger': '\U0001f5e1\U0000fe0f',
'dagger_knife': '\U0001f5e1\U0000fe0f',
'crossed_swords': '\U00002694\U0000fe0f',
'shield': '\U0001f6e1\U0000fe0f',
'smoking': '\U0001f6ac',
'coffin': '\U000026b0\U0000fe0f',
'headstone': '\U0001faa6',
'urn': '\U000026b1\U0000fe0f',
'funeral_urn': '\U000026b1\U0000fe0f',
'amphora': '\U0001f3fa',
'magic_wand': '\U0001fa84',
'crystal_ball': '\U0001f52e',
'prayer_beads': '\U0001f4ff',
'nazar_amulet': '\U0001f9ff',
'barber': '\U0001f488',
'alembic': '\U00002697\U0000fe0f',
'telescope': '\U0001f52d',
'microscope': '\U0001f52c',
'hole': '\U0001f573\U0000fe0f',
'window': '\U0001fa9f',
'adhesive_bandage': '\U0001fa79',
'stethoscope': '\U0001fa7a',
'pill': '\U0001f48a',
'syringe': '\U0001f489',
'drop_of_blood': '\U0001fa78',
'dna': '\U0001f9ec',
'microbe': '\U0001f9a0',
'petri_dish': '\U0001f9eb',
'test_tube': '\U0001f9ea',
'thermometer': '\U0001f321\U0000fe0f',
'mouse_trap': '\U0001faa4',
'broom': '\U0001f9f9',
'basket': '\U0001f9fa',
'sewing_needle': '\U0001faa1',
'roll_of_paper': '\U0001f9fb',
'toilet': '\U0001f6bd',
'plunger': '\U0001faa0',
'bucket': '\U0001faa3',
'potable_water': '\U0001f6b0',
'shower': '\U0001f6bf',
'bathtub': '\U0001f6c1',
'bath': '\U0001f6c0',
'toothbrush': '\U0001faa5',
'soap': '\U0001f9fc',
'razor': '\U0001fa92',
'sponge': '\U0001f9fd',
'squeeze_bottle': '\U0001f9f4',
'bellhop': '\U0001f6ce\U0000fe0f',
'bellhop_bell': '\U0001f6ce\U0000fe0f',
'key': <KEY>',
'door': '\U0001f6aa',
'chair': '\U0001fa91',
'mirror': '\U0001fa9e',
'couch': '\U0001f6cb\U0000fe0f',
'couch_and_lamp': '\U0001f6cb\U0000fe0f',
'bed': '\U0001f6cf\U0000fe0f',
'sleeping_accommodation': '\U0001f6cc',
'teddy_bear': '\U0001f9f8',
'frame_photo': '\U0001f5bc\U0000fe0f',
'frame_with_picture': '\U0001f5bc\U0000fe0f',
'shopping_bags': '\U0001f6cd\U0000fe0f',
'shopping_cart': '\U0001f6d2',
'shopping_trolley': '\U0001f6d2',
'gift': '\U0001f381',
'balloon': '\U0001f388',
'flags': '\U0001f38f',
'ribbon': '\U0001f380',
'confetti_ball': '\U0001f38a',
'tada': '\U0001f389',
'pi': '\U000000f1\U00000061\U00000074\U00000061\U0001fa85',
'nesting_dolls': '\U0001fa86',
'dolls': '\U0001f38e',
'izakaya_lantern': '\U0001f3ee',
'construction_site': '\U0001f3d7\U0000fe0f',
'building_construction': '\U0001f3d7\U0000fe0f',
'factory': '\U0001f3ed',
'office': '\U0001f3e2',
'department_store': '\U0001f3ec',
'post_office': '\U0001f3e3',
'european_post_office': '\U0001f3e4',
'hospital': '\U0001f3e5',
'bank': '\U0001f3e6',
'hotel': '\U0001f3e8',
'convenience_store': '\U0001f3ea',
'school': '\U0001f3eb',
'love_hotel': '\U0001f3e9',
'wedding': '\U0001f492',
'classical_building': '\U0001f3db\U0000fe0f',
'church': '\U000026ea',
'mosque': '\U0001f54c',
'synagogue': '\U0001f54d',
'hindu_temple': '\U0001f6d5',
'kaaba': '\U0001f54b',
'shinto_shrine': '\U000026e9\U0000fe0f',
'railway_track': '\U0001f6e4\U0000fe0f',
'railroad_track': '\U0001f6e4\U0000fe0f',
'motorway': '\U0001f6e3\U0000fe0f',
'japan': '\U0001f5fe',
'rice_scene': '\U0001f391',
'park': '\U0001f3de\U0000fe0f',
'national_park': '\U0001f3de\U0000fe0f',
'sunrise': '\U0001f305',
'sunrise_over_mountains': '\U0001f304',
'stars': '\U0001f320',
'sparkler': '\U0001f387',
'fireworks': '\U0001f386',
'city_sunset': '\U0001f307',
'city_sunrise': '\U0001f307',
'city_dusk': '\U0001f306',
'cityscape': '\U0001f3d9\U0000fe0f',
'night_with_stars': '\U0001f303',
'milky_way': '\U0001f30c',
'bridge_at_night': '\U0001f309',
'foggy': '\U0001f301',
'watch': '\U0000231a',
'mobile_phone': '\U0001f4f1',
'iphone': '\U0001f4f1',
'calling': '\U0001f4f2',
'computer': '\U0001f4bb',
'keyboard': '\U00002328\U0000fe0f',
'desktop': '\U0001f5a5\U0000fe0f',
'desktop_computer': '\U0001f5a5\U0000fe0f',
'printer': '\U0001f5a8\U0000fe0f',
'mouse_three_button': '\U0001f5b1\U0000fe0f',
'three_button_mouse': '\U0001f5b1\U0000fe0f',
'trackball': '\U0001f5b2\U0000fe0f',
'joystick': '\U0001f579\U0000fe0f',
'compression': '\U0001f5dc\U0000fe0f',
'minidisc': '\U0001f4bd',
'floppy_disk': '\U0001f4be',
'cd': '\U0001f4bf',
'dvd': '\U0001f4c0',
'vhs': '\U0001f4fc',
'camera': '\U0001f4f7',
'camera_with_flash': '\U0001f4f8',
'video_camera': '\U0001f4f9',
'movie_camera': '\U0001f3a5',
'projector': '\U0001f4fd\U0000fe0f',
'film_projector': '\U0001f4fd\U0000fe0f',
'film_frames': '\U0001f39e\U0000fe0f',
'telephone_receiver': '\U0001f4de',
'telephone': '\U0000260e\U0000fe0f',
'pager': '\U0001f4df',
'fax': '\U0001f4e0',
'tv': '\U0001f4fa',
'radio': '\U0001f4fb',
'microphone': '\U0001f3a4',
'level_slider': '\U0001f39a\U0000fe0f',
'control_knobs': '\U0001f39b\U0000fe0f',
'compass': '\U0001f9ed',
'stopwatch': '\U000023f1\U0000fe0f',
'timer': '\U000023f2\U0000fe0f',
'timer_clock': '\U000023f2\U0000fe0f',
'alarm_clock': '\U000023f0',
'hourglass': '\U0000231b',
'hourglass_flowing_sand': '\U000023f3',
'satellite': '\U0001f4e1',
'battery': '\U0001f50b',
'electric_plug': '\U0001f50c',
'bulb': '\U0001f4a1',
'flashlight': '\U0001f526',
'candle': '\U0001f56f\U0000fe0f',
'diya_lamp': '\U0001fa94',
'fire_extinguisher': '\U0001f9ef',
'oil': '\U0001f6e2\U0000fe0f',
'oil_drum': '\U0001f6e2\U0000fe0f',
'manual_wheelchair': '\U0001f9bd',
'motorized_wheelchair': '\U0001f9bc',
'scooter': '\U0001f6f4',
'bike': '\U0001f6b2',
'motor_scooter': '\U0001f6f5',
'motorbike': '\U0001f6f5',
'motorcycle': '\U0001f3cd\U0000fe0f',
'racing_motorcycle': '\U0001f3cd\U0000fe0f',
'auto_rickshaw': '\U0001f6fa',
'rotating_light': '\U0001f6a8',
'oncoming_police_car': '\U0001f694',
'oncoming_bus': '\U0001f68d',
'oncoming_automobile': '\U0001f698',
'oncoming_taxi': '\U0001f696',
'aerial_tramway': '\U0001f6a1',
'mountain_cableway': '\U0001f6a0',
'suspension_railway': '\U0001f69f',
'railway_car': '\U0001f683',
'train': '\U00000032\U0001f686',
'mountain_railway': '\U0001f69e',
'monorail': '\U0001f69d',
'bullettrain_side': '\U0001f684',
'bullettrain_front': '\U0001f685',
'light_rail': '\U0001f688',
'steam_locomotive': '\U0001f682',
'metro': '\U0001f687',
'tram': '\U0001f68a',
'station': '\U0001f689',
'airplane': '\U00002708\U0000fe0f',
'airplane_departure': '\U0001f6eb',
'airplane_arriving': '\U0001f6ec',
'airplane_small': '\U0001f6e9\U0000fe0f',
'small_airplane': '\U0001f6e9\U0000fe0f',
'seat': '\U0001f4ba',
'satellite_orbital': '\U0001f6f0\U0000fe0f',
'rocket': '\U0001f680',
'flying_saucer': '\U0001f6f8',
'helicopter': '\U0001f681',
'canoe': '\U0001f6f6',
'kayak': '\U0001f6f6',
'sailboat': '\U000026f5',
'speedboat': '\U0001f6a4',
'motorboat': '\U0001f6e5\U0000fe0f',
'cruise_ship': '\U0001f6f3\U0000fe0f',
'passenger_ship': '\U0001f6f3\U0000fe0f',
'ferry': '\U000026f4\U0000fe0f',
'ship': '\U0001f6a2',
'anchor': '\U00002693',
'fuelpump': '\U000026fd',
'construction': '\U0001f6a7',
'vertical_traffic_light': '\U0001f6a6',
'traffic_light': '\U0001f6a5',
'busstop': '\U0001f68f',
'map': '\U0001f5fa\U0000fe0f',
'world_map': '\U0001f5fa\U0000fe0f',
'moyai': '\U0001f5ff',
'statue_of_liberty': '\U0001f5fd',
'tokyo_tower': '\U0001f5fc',
'european_castle': '\U0001f3f0',
'japanese_castle': '\U0001f3ef',
'stadium': '\U0001f3df\U0000fe0f',
'ferris_wheel': '\U0001f3a1',
'roller_coaster': '\U0001f3a2',
'carousel_horse': '\U0001f3a0',
'fountain': '\U000026f2',
'beach_umbrella': '\U000026f1\U0000fe0f',
'umbrella_on_ground': '\U000026f1\U0000fe0f',
'beach': '\U0001f3d6\U0000fe0f',
'beach_with_umbrella': '\U0001f3d6\U0000fe0f',
'island': '\U0001f3dd\U0000fe0f',
'desert_island': '\U0001f3dd\U0000fe0f',
'desert': '\U0001f3dc\U0000fe0f',
'volcano': '\U0001f30b',
'mountain': '\U000026f0\U0000fe0f',
'mountain_snow': '\U0001f3d4\U0000fe0f',
'snow_capped_mountain': '\U0001f3d4\U0000fe0f',
'mount_fuji': '\U0001f5fb',
'camping': '\U0001f3d5\U0000fe0f',
'tent': '\U000026fa',
'house': '\U0001f3e0',
'house_with_garden': '\U0001f3e1',
'homes': '\U0001f3d8\U0000fe0f',
'house_buildings': '\U0001f3d8\U0000fe0f',
'house_abandoned': '\U0001f3da\U0000fe0f',
'derelict_house_building': '\U0001f3da\U0000fe0f',
'hut': '\U0001f6d6',
'person_surfing': '\U0001f3c4',
'surfer': '\U0001f3c4',
'woman_surfing': '\U0001f3c4\U0000200d\U00002640\U0000fe0f',
'man_surfing': '\U0001f3c4\U0000200d\U00002642\U0000fe0f',
'person_swimming': '\U0001f3ca',
'swimmer': '\U0001f3ca',
'woman_swimming': '\U0001f3ca\U0000200d\U00002640\U0000fe0f',
'man_swimming': '\U0001f3ca\U0000200d\U00002642\U0000fe0f',
'person_playing_water_polo': '\U0001f93d',
'water_polo': '\U0001f93d',
'woman_playing_water_polo': '\U0001f93d\U0000200d\U00002640\U0000fe0f',
'man_playing_water_polo': '\U0001f93d\U0000200d\U00002642\U0000fe0f',
'person_rowing_boat': '\U0001f6a3',
'rowboat': '\U0001f6a3',
'woman_rowing_boat': '\U0001f6a3\U0000200d\U00002640\U0000fe0f',
'man_rowing_boat': '\U0001f6a3\U0000200d\U00002642\U0000fe0f',
'person_climbing': '\U0001f9d7',
'woman_climbing': '\U0001f9d7\U0000200d\U00002640\U0000fe0f',
'man_climbing': '\U0001f9d7\U0000200d\U00002642\U0000fe0f',
'person_mountain_biking': '\U0001f6b5',
'mountain_bicyclist': '\U0001f6b5',
'woman_mountain_biking': '\U0001f6b5\U0000200d\U00002640\U0000fe0f',
'man_mountain_biking': '\U0001f6b5\U0000200d\U00002642\U0000fe0f',
'person_biking': '\U0001f6b4',
'bicyclist': '\U0001f6b4',
'woman_biking': '\U0001f6b4\U0000200d\U00002640\U0000fe0f',
'man_biking': '\U0001f6b4\U0000200d\U00002642\U0000fe0f',
'trophy': '\U0001f3c6',
'first_place': '\U0001f947',
'first_place_medal': '\U0001f947',
'second_place': '\U0001f948',
'second_place_medal': '\U0001f948',
'third_place': '\U0001f949',
'third_place_medal': '\U0001f949',
'medal': '\U0001f3c5',
'sports_medal': '\U0001f3c5',
'military_medal': '\U0001f396\U0000fe0f',
'rosette': '\U0001f3f5\U0000fe0f',
'reminder_ribbon': '\U0001f397\U0000fe0f',
'ticket': '\U0001f3ab',
'tickets': '\U0001f39f\U0000fe0f',
'admission_tickets': '\U0001f39f\U0000fe0f',
'circus_tent': '\U0001f3aa',
'person_juggling': '\U0001f939',
'juggling': '\U0001f939',
'juggler': '\U0001f939',
'woman_juggling': '\U0001f939\U0000200d\U00002640\U0000fe0f',
'man_juggling': '\U0001f939\U0000200d\U00002642\U0000fe0f',
'performing_arts': '\U0001f3ad',
'ballet_shoes': '\U0001fa70',
'art': '\U0001f3a8',
'clapper': '\U0001f3ac',
'headphones': '\U0001f3a7',
'musical_score': '\U0001f3bc',
'musical_keyboard': '\U0001f3b9',
'drum': '\U0001f941',
'drum_with_drumsticks': '\U0001f941',
'long_drum': '\U0001fa98',
'saxophone': '\U0001f3b7',
'trumpet': '\U0001f3ba',
'guitar': '\U0001f3b8',
'banjo': '\U0001fa95',
'violin': '\U0001f3bb',
'accordion': '\U0001fa97',
'game_die': '\U0001f3b2',
'chess_pawn': '\U0000265f\U0000fe0f',
'dart': '\U0001f3af',
'bowling': '\U0001f3b3',
'video_game': '\U0001f3ae',
'slot_machine': '\U0001f3b0',
'jigsaw': '\U0001f9e9',
'red_car': '\U0001f697',
'taxi': '\U0001f695',
'blue_car': '\U0001f699',
'pickup_truck': '\U0001f6fb',
'bus': '\U0001f68c',
'trolleybus': '\U0001f68e',
'race_car': '\U0001f3ce\U0000fe0f',
'racing_car': '\U0001f3ce\U0000fe0f',
'police_car': '\U0001f693',
'ambulance': '\U0001f691',
'fire_engine': '\U0001f692',
'minibus': '\U0001f690',
'truck': '\U0001f69a',
'articulated_lorry': '\U0001f69b',
'tractor': '\U0001f69c',
'probing_cane': '\U0001f9af',
'fork_and_knife': '\U0001f374',
'fork_knife_plate': '\U0001f37d\U0000fe0f',
'fork_and_knife_with_plate': '\U0001f37d\U0000fe0f',
'bowl_with_spoon': '\U0001f963',
'takeout_box': '\U0001f961',
'chopsticks': '\U0001f962',
'salt': '\U0001f9c2',
'soccer': '\U000026bd',
'basketball': '\U0001f3c0',
'football': '\U0001f3c8',
'baseball': '\U000026be',
'softball': '\U0001f94e',
'tennis': '\U0001f3be',
'volleyball': '\U0001f3d0',
'rugby_football': '\U0001f3c9',
'flying_disc': '\U0001f94f',
'boomerang': '\U0001fa83',
'ball': '\U0001f3b1',
'yo_yo': '\U0001fa80',
'ping_pong': '\U0001f3d3',
'table_tennis': '\U0001f3d3',
'badminton': '\U0001f3f8',
'hockey': '\U0001f3d2',
'field_hockey': '\U0001f3d1',
'lacrosse': '\U0001f94d',
'cricket_game': '\U0001f3cf',
'cricket_bat_ball': '\U0001f3cf',
'goal': '\U0001f945',
'goal_net': '\U0001f945',
'golf': '\U000026f3',
'kite': '\U0001fa81',
'bow_and_arrow': '\U0001f3f9',
'archery': '\U0001f3f9',
'fishing_pole_and_fish': '\U0001f3a3',
'diving_mask': '\U0001f93f',
'boxing_glove': '\U0001f94a',
'boxing_gloves': '\U0001f94a',
'martial_arts_uniform': '\U0001f94b',
'karate_uniform': '\U0001f94b',
'running_shirt_with_sash': '\U0001f3bd',
'skateboard': '\U0001f6f9',
'roller_skate': '\U0001f6fc',
'sled': '\U0001f6f7',
'ice_skate': '\U000026f8\U0000fe0f',
'curling_stone': '\U0001f94c',
'ski': '\U0001f3bf',
'skier': '\U000026f7\U0000fe0f',
'snowboarder': '\U0001f3c2',
'parachute': '\U0001fa82',
'person_lifting_weights': '\U0001f3cb\U0000fe0f',
'lifter': '\U0001f3cb\U0000fe0f',
'weight_lifter': '\U0001f3cb\U0000fe0f',
'woman_lifting_weights': '\U0001f3cb\U0000fe0f\U0000200d\U00002640\U0000fe0f',
'man_lifting_weights': '\U0001f3cb\U0000fe0f\U0000200d\U00002642\U0000fe0f',
'people_wrestling': '\U0001f93c',
'wrestlers': '\U0001f93c',
'wrestling': '\U0001f93c',
'women_wrestling': '\U0001f93c\U0000200d\U00002640\U0000fe0f',
'men_wrestling': '\U0001f93c\U0000200d\U00002642\U0000fe0f',
'person_doing_cartwheel': '\U0001f938',
'cartwheel': '\U0001f938',
'woman_cartwheeling': '\U0001f938\U0000200d\U00002640\U0000fe0f',
'man_cartwheeling': '\U0001f938\U0000200d\U00002642\U0000fe0f',
'person_bouncing_ball': '\U000026f9\U0000fe0f',
'basketball_player': '\U000026f9\U0000fe0f',
'person_with_ball': '\U000026f9\U0000fe0f',
'woman_bouncing_ball': '\U000026f9\U0000fe0f\U0000200d\U00002640\U0000fe0f',
'man_bouncing_ball': '\U000026f9\U0000fe0f\U0000200d\U00002642\U0000fe0f',
'person_fencing': '\U0001f93a',
'fencer': '\U0001f93a',
'fencing': '\U0001f93a',
'person_playing_handball': '\U0001f93e',
'handball': '\U0001f93e',
'woman_playing_handball': '\U0001f93e\U0000200d\U00002640\U0000fe0f',
'man_playing_handball': '\U0001f93e\U0000200d\U00002642\U0000fe0f',
'person_golfing': '\U0001f3cc\U0000fe0f',
'golfer': '\U0001f3cc\U0000fe0f',
'woman_golfing': '\U0001f3cc\U0000fe0f\U0000200d\U00002640\U0000fe0f',
'man_golfing': '\U0001f3cc\U0000fe0f\U0000200d\U00002642\U0000fe0f',
'horse_racing': '\U0001f3c7',
'person_in_lotus_position': '\U0001f9d8',
'woman_in_lotus_position': '\U0001f9d8\U0000200d\U00002640\U0000fe0f',
'man_in_lotus_position': '\U0001f9d8\U0000200d\U00002642\U0000fe0f',
'avocado': '\U0001f951',
'olive': '\U0001fad2',
'broccoli': '\U0001f966',
'leafy_green': '\U0001f96c',
'bell_pepper': '\U0001fad1',
'cucumber': '\U0001f952',
'hot_pepper': '\U0001f336\U0000fe0f',
'corn': '\U0001f33d',
'carrot': '\U0001f955',
'garlic': '\U0001f9c4',
'onion': '\U0001f9c5',
'potato': '\U0001f954',
'sweet_potato': '\U0001f360',
'croissant': '\U0001f950',
'bagel': '\U0001f96f',
'bread': '\U0001f35e',
'french_bread': '\U0001f956',
'baguette_bread': '\U0001f956',
'flatbread': '\U0001fad3',
'pretzel': '\U0001f968',
'cheese': '\U0001f9c0',
'cheese_wedge': '\U0001f9c0',
'egg': '\U0001f95a',
'cooking': '\U0001f373',
'butter': '\U0001f9c8',
'pancakes': '\U0001f95e',
'waffle': '\U0001f9c7',
'bacon': '\U0001f953',
'cut_of_meat': '\U0001f969',
'poultry_leg': '\U0001f357',
'meat_on_bone': '\U0001f356',
'hotdog': '\U0001f32d',
'hot_dog': '\U0001f32d',
'hamburger': '\U0001f354',
'fries': '\U0001f35f',
'pizza': '\U0001f355',
'sandwich': '\U0001f96a',
'stuffed_flatbread': '\U0001f959',
'stuffed_pita': '\U0001f959',
'falafel': '\U0001f9c6',
'taco': '\U0001f32e',
'burrito': '\U0001f32f',
'tamale': '\U0001fad4',
'salad': '\U0001f957',
'green_salad': '\U0001f957',
'shallow_pan_of_food': '\U0001f958',
'paella': '\U0001f958',
'fondue': '\U0001fad5',
'canned_food': '\U0001f96b',
'spaghetti': '\U0001f35d',
'ramen': '\U0001f35c',
'stew': '\U0001f372',
'curry': '\U0001f35b',
'sushi': '\U0001f363',
'bento': '\U0001f371',
'dumpling': '\U0001f95f',
'oyster': '\U0001f9aa',
'fried_shrimp': '\U0001f364',
'rice_ball': '\U0001f359',
'rice': '\U0001f35a',
'rice_cracker': '\U0001f358',
'fish_cake': '\U0001f365',
'fortune_cookie': '\U0001f960',
'moon_cake': '\U0001f96e',
'oden': '\U0001f362',
'dango': '\U0001f361',
'shaved_ice': '\U0001f367',
'ice_cream': '\U0001f368',
'icecream': '\U0001f366',
'pie': '\U0001f967',
'cupcake': '\U0001f9c1',
'cake': '\U0001f370',
'birthday': '\U0001f382',
'custard': '\U0001f36e',
'pudding': '\U0001f36e',
'flan': '\U0001f36e',
'lollipop': '\U0001f36d',
'candy': '\U0001f36c',
'chocolate_bar': '\U0001f36b',
'popcorn': '\U0001f37f',
'doughnut': '\U0001f369',
'cookie': '\U0001f36a',
'chestnut': '\U0001f330',
'peanuts': '\U0001f95c',
'shelled_peanut': '\U0001f95c',
'honey_pot': '\U0001f36f',
'milk': '\U0001f95b',
'glass_of_milk': '\U0001f95b',
'baby_bottle': '\U0001f37c',
'coffee': '\U00002615',
'teapot': '\U0001fad6',
'mate': '\U0001f9c9',
'bubble_tea': '\U0001f9cb',
'beverage_box': '\U0001f9c3',
'cup_with_straw': '\U0001f964',
'sake': '\U0001f376',
'beer': '\U0001f37a',
'beers': '\U0001f37b',
'champagne_glass': '\U0001f942',
'clinking_glass': '\U0001f942',
'wine_glass': '\U0001f377',
'tumbler_glass': '\U0001f943',
'whisky': '\U0001f943',
'cocktail': '\U0001f378',
'tropical_drink': '\U0001f379',
'champagne': '\U0001f37e',
'bottle_with_popping_cork': '\U0001f37e',
'ice_cube': '\U0001f9ca',
'spoon': '\U0001f944',
'ear_of_rice': '\U0001f33e',
'potted_plant': '\U0001fab4',
'bouquet': '\U0001f490',
'tulip': '\U0001f337',
'rose': '\U0001f339',
'wilted_rose': '\U0001f940',
'wilted_flower': '\U0001f940',
'hibiscus': '\U0001f33a',
'cherry_blossom': '\U0001f338',
'blossom': '\U0001f33c',
'sunflower': '\U0001f33b',
'sun_with_face': '\U0001f31e',
'full_moon_with_face': '\U0001f31d',
'first_quarter_moon_with_face': '\U0001f31b',
'last_quarter_moon_with_face': '\U0001f31c',
'new_moon_with_face': '\U0001f31a',
'full_moon': '\U0001f315',
'waning_gibbous_moon': '\U0001f316',
'last_quarter_moon': '\U0001f317',
'waning_crescent_moon': '\U0001f318',
'new_moon': '\U0001f311',
'waxing_crescent_moon': '\U0001f312',
'first_quarter_moon': '\U0001f313',
'waxing_gibbous_moon': '\U0001f314',
'crescent_moon': '\U0001f319',
'earth_americas': '\U0001f30e',
'earth_africa': '\U0001f30d',
'earth_asia': '\U0001f30f',
'ringed_planet': '\U0001fa90',
'dizzy': '\U0001f4ab',
'star': '\U00002b50',
'sparkles': '\U00002728',
'zap': '\U000026a1',
'comet': '\U00002604\U0000fe0f',
'boom': '\U0001f4a5',
'fire': '\U0001f525',
'flame': '\U0001f525',
'cloud_tornado': '\U0001f32a\U0000fe0f',
'cloud_with_tornado': '\U0001f32a\U0000fe0f',
'rainbow': '\U0001f308',
'sunny': '\U00002600\U0000fe0f',
'white_sun_small_cloud': '\U0001f324\U0000fe0f',
'white_sun_with_small_cloud': '\U0001f324\U0000fe0f',
'partly_sunny': '\U000026c5',
'white_sun_cloud': '\U0001f325\U0000fe0f',
'white_sun_behind_cloud': '\U0001f325\U0000fe0f',
'cloud': '\U00002601\U0000fe0f',
'white_sun_rain_cloud': '\U0001f326\U0000fe0f',
'white_sun_behind_cloud_with_rain': '\U0001f326\U0000fe0f',
'cloud_rain': '\U0001f327\U0000fe0f',
'cloud_with_rain': '\U0001f327\U0000fe0f',
'thunder_cloud_rain': '\U000026c8\U0000fe0f',
'thunder_cloud_and_rain': '\U000026c8\U0000fe0f',
'cloud_lightning': '\U0001f329\U0000fe0f',
'cloud_with_lightning': '\U0001f329\U0000fe0f',
'cloud_snow': '\U0001f328\U0000fe0f',
'cloud_with_snow': '\U0001f328\U0000fe0f',
'snowflake': '\U00002744\U0000fe0f',
'snowman': '\U000026c4',
'wind_blowing_face': '\U0001f32c\U0000fe0f',
'dash': '\U0001f4a8',
'droplet': '\U0001f4a7',
'sweat_drops': '\U0001f4a6',
'umbrella': '\U00000032\U00002602\U0000fe0f',
'ocean': '\U0001f30a',
'fog': '\U0001f32b\U0000fe0f',
'green_apple': '\U0001f34f',
'apple': '\U0001f34e',
'pear': '\U0001f350',
'tangerine': '\U0001f34a',
'lemon': | |
T_{f,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (nOutlets, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# Coefficient matrices from continuity condition:
# [b_u]*[T_{f,u}](z=0) = [b_d]*[T_{f,d}](z=0) + [b_b]*[T_b]
a_in, a_out, a_b = self._continuity_condition(
m_flow_borehole, cp_f, nSegments)
return a_in, a_out, a_b
def _continuity_condition_head(self, m_flow_borehole, cp, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures at depth
(z = 0). These coefficients take into account connections between
U-tube pipes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z=0) = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{out}} \\mathbf{T_{f,out}}
+ \\mathbf{a_{b}} \\mathbf{T_{b}}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (2*nPipes, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (2*nPipes, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperature.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp, nSegments)
a_in = np.eye(2*self.nPipes, M=self.nPipes, k=0)
a_out = np.eye(2*self.nPipes, M=self.nPipes, k=-self.nPipes)
a_b = np.zeros((2*self.nPipes, nSegments))
return a_in, a_out, a_b
def _format_inputs(self, m_flow_borehole, cp_f, nSegments):
"""
Format mass flow rate and heat capacity inputs.
"""
# Format mass flow rate inputs
# Mass flow rate in each fluid circuit
m_flow_in = np.atleast_1d(m_flow_borehole)
if not len(m_flow_in) == self.nInlets:
raise ValueError(
'Incorrect length of mass flow vector.')
self._m_flow_in = m_flow_in
# Mass flow rate in pipes
m_flow_pipe = np.tile(m_flow_in, 2)
self._m_flow_pipe = m_flow_pipe
# Format heat capacity inputs
# Heat capacity in each fluid circuit
cp_in = np.atleast_1d(cp_f)
if len(cp_in) == 1:
cp_in = np.tile(cp_f, self.nInlets)
elif not len(cp_in) == self.nInlets:
raise ValueError(
'Incorrect length of heat capacity vector.')
self._cp_in = cp_in
# Heat capacity in pipes
cp_pipe = np.tile(cp_in, 2)
self._cp_pipe = cp_pipe
class Coaxial(SingleUTube):
"""
Class for coaxial boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Hellstrom [#Coaxial-Hellstrom1991]_.
Attributes
----------
pos : tuple
Position (x, y) (in meters) of the pipes inside the borehole.
r_in : (2,) array
Inner radii (in meters) of the coaxial pipes. The first element of the
array corresponds to the inlet pipe.
r_out : (2,) array
Outer radii (in meters) of the coaxial pipes. The first element of the
array corresponds to the inlet pipe.
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
k_s : float
Soil thermal conductivity (in W/m-K).
k_g : float
Grout thermal conductivity (in W/m-K).
R_ff : float
Fluid to fluid thermal resistance of the inner pipe to the outer pipe
(in m-K/W).
R_fp : float
Fluid to outer pipe wall thermal resistance of the outer pipe in
contact with the grout (in m-K/W).
J : int, optional
Number of multipoles per pipe to evaluate the thermal resistances.
Default is 2.
nPipes : int
Number of U-Tubes, equals to 1.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both are equal to 1 for a coaxial
borehole. `nSegments` is the number of discretized segments along the
borehole. `nPipes` is the number of pipes (i.e. the number of U-tubes) in
the borehole, equal to 1. `nDepths` is the number of depths at which
temperatures are evaluated.
References
----------
.. [#Coaxial-Hellstrom1991] <NAME>. (1991). Ground heat storage.
Thermal Analyses of Duct Storage Systems I: Theory. PhD Thesis.
University of Lund, Department of Mathematical Physics. Lund, Sweden.
"""
def __init__(self, pos, r_in, r_out, borehole, k_s, k_g, R_ff, R_fp, J=2):
if isinstance(pos, tuple):
pos = [pos]
self.pos = pos
self.r_in = r_in
self.r_out = r_out
self.b = borehole
self.k_s = k_s
self.k_g = k_g
self.R_ff = R_ff
self.R_fp = R_fp
self.J = J
self.nPipes = 1
self.nInlets = 1
self.nOutlets = 1
self._check_geometry()
# Determine the indexes of the inner and outer pipes
iInner = r_out.argmin()
iOuter = r_out.argmax()
# Outer pipe to borehole wall thermal resistance
R_fg = thermal_resistances(pos, r_out[iOuter], borehole.r_b, k_s,
k_g, self.R_fp, J=self.J)[1][0]
# Delta-circuit thermal resistances
self._Rd = np.zeros((2*self.nPipes, 2*self.nPipes))
self._Rd[iInner, iInner] = np.inf
self._Rd[iInner, iOuter] = R_ff
self._Rd[iOuter, iInner] = R_ff
self._Rd[iOuter, iOuter] = R_fg
# Initialize stored_coefficients
self._initialize_stored_coefficients()
def visualize_pipes(self):
"""
Plot the cross-section view of the borehole.
Returns
-------
fig : figure
Figure object (matplotlib).
"""
# Determine the indexes of the inner and outer pipes
iInner = self.r_out.argmin()
iOuter = self.r_out.argmax()
# Configure figure and axes
fig = _initialize_figure()
ax = fig.add_subplot(111)
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$y$ [m]')
ax.axis('equal')
_format_axes(ax)
# Color cycle
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
lw = plt.rcParams['lines.linewidth']
# Borehole wall outline
ax.plot([-self.b.r_b, 0., self.b.r_b, 0.],
[0., self.b.r_b, 0., -self.b.r_b],
'k.', alpha=0.)
borewall = plt.Circle(
(0., 0.), radius=self.b.r_b, fill=False,
color='k', linestyle='--', lw=lw)
ax.add_patch(borewall)
# Pipes
for i in range(self.nPipes):
# Coordinates of pipes
(x_in, y_in) = self.pos[i]
(x_out, y_out) = self.pos[i]
# Pipe outline (inlet)
pipe_in_in = plt.Circle(
(x_in, y_in), radius=self.r_in[0],
fill=False, linestyle='-', color=colors[i], lw=lw)
pipe_in_out = plt.Circle(
(x_in, y_in), radius=self.r_out[0],
fill=False, linestyle='-', color=colors[i], lw=lw)
if iInner == 0:
ax.text(x_in, y_in, i, ha="center", va="center")
else:
ax.text(x_in + 0.5 * (self.r_out[0] + self.r_in[1]), y_in, i,
ha="center", va="center")
# Pipe outline (outlet)
pipe_out_in = plt.Circle(
(x_out, y_out), radius=self.r_in[1],
fill=False, linestyle='-', color=colors[i], lw=lw)
pipe_out_out = plt.Circle(
(x_out, y_out), radius=self.r_out[1],
fill=False, linestyle='-', color=colors[i], lw=lw)
if iInner == 1:
ax.text(x_out, y_out, i + self.nPipes, ha="center", va="center")
else:
ax.text(x_out + 0.5 * (self.r_out[0] + self.r_in[1]), y_out,
i + self.nPipes, ha="center", va="center")
ax.add_patch(pipe_in_in)
ax.add_patch(pipe_in_out)
ax.add_patch(pipe_out_in)
ax.add_patch(pipe_out_out)
plt.tight_layout()
return fig
def _check_geometry(self):
""" Verifies the inputs to the pipe object and raises an error if
the geometry is not valid.
"""
# Determine the indexes of the inner and outer pipes
iInner = self.r_out.argmin()
iOuter = self.r_out.argmax()
# Verify that thermal properties are greater than 0.
if not self.k_s > 0.:
raise ValueError(
'The ground thermal conductivity must be greater than zero. '
'A value of {} was provided.'.format(self.k_s))
if not self.k_g > 0.:
raise ValueError(
'The grout thermal conductivity must be greater than zero. '
'A value of {} was provided.'.format(self.k_g))
if not np.all(self.R_ff) >= 0.:
raise ValueError(
'The fluid to fluid thermal resistance must be'
'greater or equal to zero. '
'A value of {} was provided.'.format(self.R_ff))
if not np.all(self.R_fp) > 0.:
raise ValueError(
'The fluid to outer pipe wall thermal resistance must be'
'greater than zero. '
'A value of {} was provided.'.format(self.R_fp))
# Verify that the pipe radius is greater than zero.
if not np.all(self.r_in) > 0.:
raise ValueError(
'The pipe inner radius must be greater than zero. '
'A value of {} was provided.'.format(self.r_in))
# Verify that the outer pipe radius is greater than the inner pipe
# radius.
if not np.all(np.greater(self.r_out, self.r_in)):
raise ValueError(
'The pipe outer radius must be greater than the pipe inner'
' radius. '
'A value of {} was provided.'.format(self.r_out))
# Verify that the inner radius of the outer pipe is greater than the
# outer radius of the inner pipe.
if not np.greater(self.r_in[iOuter], self.r_out[iInner]):
raise ValueError(
'The inner radius of the | |
# -*- coding: utf-8 -*-
"""
Created on 2017/6/9
@author: MG
"""
import logging
import threading
import inspect
from ctp import ApiStruct, MdApi, TraderApi
import hashlib, os, sys, tempfile, time, re
from config import Config, PeriodType, PositionDateType
from backend.fh_utils import str_2_bytes, bytes_2_str
from datetime import datetime, timedelta, date
from queue import Queue, Empty
from threading import Thread
from collections import OrderedDict
from backend.orm import CommissionRateModel
from event_agent import event_agent, EventType
from md_saver import MdMin1Combiner, MdMinNCombiner, MdSaver, MdPublisher
TOO_SMALL_TO_AVAILABLE = 0.0000001
TOO_LARGE_TO_AVAILABLE = 100000000
OST_Canceled_STR = bytes_2_str(ApiStruct.OST_Canceled)
FRONT_DISCONNECTED_REASON_DIC = {
0x1001: '网络读失败',
0x1002: '网络写失败',
0x2001: '接收心跳超时',
0x2002: '发送心跳失败',
0x2003: '收到错误报文',
}
class RequestInfo:
"""
保存 request 相关信息到 queue中,共其他线程执行
"""
def __init__(self, func, p_struct, request_id, request_timeout=0, max_wait_rsp_time=2):
"""
:param func:
:param p_struct:
:param request_id:
:param request_timeout:
:param max_wait_rsp_time:
"""
self.func = func
self.p_struct = p_struct
self.request_id = request_id
self.request_timeout = request_timeout
self.create_datetime = datetime.now()
self.handle_datetime = datetime.now()
self.max_wait_rsp_time = max_wait_rsp_time
class ApiBase:
def __init__(self,
broker_id=Config.BROKER_ID,
user_id=Config.USER_ID,
password=Config.PASSWORD):
self.logger = logging.getLogger(self.__class__.__name__)
# TODO: 未来将通过 self.init_request_id() 增加对 request_id的初始化动作
self._request_id = 0
self._mutex_request_id = threading.Lock()
# TODO: 初始化 is_SettlementInfoConfirmed 信息,并封装成属性,同步更新数据库
self.broker_id = broker_id
self.user_id = user_id
self.password = password
self.trading_day = ''
self.front_id = 1
self.session_id = 0
# 已登录标志位
self._has_login = False
# 请求响应队列
self._request_info_resp_queue = Queue()
# 等待请求响应字典
self._request_info_wait_resp_dic = {}
# 等待发送请求队列
self._request_info_will_be_send_queue = Queue()
self._handle_request_in_queue_thread = Thread(target=self._handle_request_in_queue, daemon=True)
self._handle_request_in_queue_thread_running = True
@property
def has_login(self):
return self._has_login
@has_login.setter
def has_login(self, val):
self._has_login = val
if val and not self._handle_request_in_queue_thread.is_alive():
self._handle_request_in_queue_thread_running = True
self._handle_request_in_queue_thread.start()
elif not val:
self._handle_request_in_queue_thread_running = False
@staticmethod
def struc_attr_value_transfor(attr_value, validate_data_type=None):
"""
用于将 OnRsp 接口中返回的类型的对应属性值中bytes 值转换为str ,其他类型原样返回
目前该函数仅供 struct_2_json 函数使用
:param attr_value:
:param validate_data_type: 默认为 None, 验证字段是否有效,例如:date类型,验证字符串是否为日期格式(处于性能考虑,目前只验证字段长度,达到8为就代表是日期格式,对于 StartDelivDate 有时候会传递 b'1'值导致合约更新失败)
:return:
"""
val_type = type(attr_value)
if val_type == bytes:
ret = bytes_2_str(attr_value)
else:
ret = attr_value
if validate_data_type is not None:
if validate_data_type == date:
if len(attr_value)!=8:
ret = None
else:
try:
ret = datetime.strptime(attr_value, Config.DATE_FORMAT_STR_CTP)
except:
ret = None
return ret
@staticmethod
def struct_2_dic(a_struct, validate_data_type:dict=None):
"""
用于将 OnRsp 接口中返回的类型转换为 json 字符串
:param a_struct:
:param validate_data_type: 默认为 None, 验证字段是否有效
:return:
"""
attr_dic = {k: ApiBase.struc_attr_value_transfor(getattr(a_struct, k),
None if validate_data_type is None else validate_data_type.setdefault(k, None))
for k, v in a_struct._fields_}
# json_str = json.dumps(attr_dic)
return attr_dic
@staticmethod
def insert_one_2_db(a_struct, collection_name=None, **kwargs):
"""
用于将 OnRsp 接口中返回的类型转换为 json 字符串,插入mongo数据库制定 collectoin 中
:param a_struct:
:param collection_name:
:return:
"""
if collection_name is None:
collection_name = a_struct.__class__.__name__
dic = ApiBase.struct_2_dic(a_struct)
dic.update(kwargs)
# with_mongo_collection(lambda col: col.insert_one(dic), collection_name)
Config.do_for_mongo_collection(lambda col: col.insert_one(dic), collection_name)
return dic
def get_tmp_path(self):
"""
获取api启动是临时文件存放目录
:return:
"""
folder_name = b''.join((b'ctp.futures', self.broker_id, self.user_id))
dir_name = hashlib.md5(folder_name).hexdigest()
dir_path = os.path.join(tempfile.gettempdir(), dir_name, self.__class__.__name__) + os.sep
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return os.fsencode(dir_path) if sys.version_info[0] >= 3 else dir_path
def is_rsp_success(self, rsp_info, stack_num=1):
"""
检查 rsp_info 状态,已上层函数名记录相应日志
:param rsp_info:
:param stack_num:
:return:
"""
is_success = rsp_info is None or rsp_info.ErrorID == 0
if not is_success:
stack = inspect.stack()
parent_function_name = stack[stack_num].function
self.logger.error('%s 失败:%s', parent_function_name, bytes_2_str(rsp_info.ErrorMsg))
return is_success
def resp_common(self, rsp_info, request_id, is_last, stack_num=1):
"""
查询 RspInfo状态及bLsLast状态
:param rsp_info:
:param is_last:
:param stack_num: 被调用函数堆栈层数
:return: 1 成功结束;0 成功但未结束;-1 未成功
"""
# self.logger.debug("resp: %s" % str(rsp_info))
is_success = self.is_rsp_success(rsp_info, stack_num=2)
if not is_success:
self._get_response(request_id)
return -1
elif is_last and is_success:
self._get_response(request_id)
return 1
else:
# try:
# stack = inspect.stack()
# parent_function_name = stack[stack_num].function
# self.logger.debug("%s 等待数据接收完全...", parent_function_name)
# except:
# self.logger.warning("get stack error")
return 0
def init_request_id(self):
"""
增加对 request_id的初始化动作
:return:
"""
raise NotImplementedError()
def inc_request_id(self):
"""
获取自增的 request_id (线程安全)
:return:
"""
self._mutex_request_id.acquire()
self._request_id += 1
self._mutex_request_id.release()
return self._request_id
def log_req_if_error(self, req_ret, func_name=None, stack_num=1):
"""
如果req 返回小于0 则记录错误信息
-1,表示网络连接失败;
-2,表示未处理请求超过许可数;
-3,表示每秒发送请求数超过许可数。
:param req_ret:
:param stack_num: 被调用函数堆栈层数
:return:
"""
if req_ret is None:
msg = "req 返回为 None"
if func_name is None:
stack = inspect.stack()
func_name = stack[stack_num].function
self.logger.error('%s 返回错误:%s', func_name, msg)
elif req_ret < 0:
if req_ret == -1:
msg = '网络连接失败'
elif req_ret == -2:
msg = '未处理请求超过许可数'
elif req_ret == -3:
msg = '每秒发送请求数超过许可数'
else:
msg = '其他原因'
if func_name is None:
stack = inspect.stack()
func_name = stack[stack_num].function
self.logger.error('%s 返回错误:%s', func_name, msg)
def _send_request_2_queue(self, func, p_struct, add_request_id=False, add_order_ref=False, request_timeout=2, max_wait_rsp_time=2):
"""
公共Request方法,传入方面,及相关struct数据
函数自动生成新的request_id及进行log记录
:param func:
:param p_struct:
:param add_request_id:
:param request_timeout: 默认1秒钟超时。0 不做超时检查
:param max_wait_rsp_time: 默认2秒钟超时。0 不做超时检查
:return:
"""
request_id = self.inc_request_id()
if add_request_id:
p_struct.RequestID = request_id
if add_order_ref:
order_ref = self.inc_order_ref()
p_struct.OrderRef = order_ref
req_info = RequestInfo(func, p_struct, request_id, request_timeout, max_wait_rsp_time)
# self.logger.debug("发送请求到 _request_info_will_be_send_queue")
self._request_info_will_be_send_queue.put(req_info)
# self.logger.debug("发送请求到 _request_info_will_be_send_queue 完成")
return 0
def _handle_request_in_queue(self):
"""
用于从后台请求队列中获取请求,并执行发送
:return:
"""
self.logger.info("%s 请求队列 后台发送线程 启动", self.__class__.__name__)
max_wait_time = 30
datetime_last_req = datetime.now()
# 设定最大请求发送频率 0.5 秒钟一笔请求
max_seconds_4_freq_request = 1
while self._handle_request_in_queue_thread_running:
# CTP存在请求过于密集拒绝响应的情况,因此,设定一个请求等待机制
# 交易所未返回请求响应前等待最大 max_wait_time 秒
# 超过响应时间,则认为该请求已经废弃,继续执行后面的请求
# self.logger.debug("后台请求队列处理线程 Looping 1")
try:
if len(self._request_info_wait_resp_dic) > 0:
request_id = self._request_info_resp_queue.get(timeout=1)
if request_id in self._request_info_wait_resp_dic:
req_info = self._request_info_wait_resp_dic.pop(request_id)
datetime_last_req = req_info.handle_datetime
self._request_info_resp_queue.task_done()
except Empty:
for request_id in list(self._request_info_wait_resp_dic.keys()):
req_info = self._request_info_wait_resp_dic[request_id]
handle_datetime = req_info.handle_datetime
if (datetime.now() - handle_datetime).seconds > req_info.max_wait_rsp_time:
self.logger.warning('请求超时 %s[%d] %s -> %s', req_info.func.__name__, request_id,
handle_datetime, req_info.p_struct)
del self._request_info_wait_resp_dic[request_id]
else:
self.logger.debug("等待请求响应 %s[%d] %s -> %s", req_info.func.__name__, request_id,
handle_datetime, req_info.p_struct)
pass
continue
except:
self.logger.exception("从 _request_info_will_be_send_queue 获取信息出现异常")
# self.logger.debug("后台请求队列处理线程 Looping 2")
try:
req_info = self._request_info_will_be_send_queue.get(timeout=5)
# self.logger.debug("后台请求队列处理线程 Looping 2.1")
func = req_info.func
func_name = func.__name__
request_id = req_info.request_id
request_timeout = req_info.request_timeout
total_seconds = (datetime.now() - datetime_last_req).total_seconds()
if total_seconds < max_seconds_4_freq_request:
time.sleep(max_seconds_4_freq_request - total_seconds)
p_struct = req_info.p_struct
if (datetime.now() - req_info.create_datetime).total_seconds() > request_timeout > 0:
# 超时检查主要是为了防止出现队列堵塞结束后,请求集中爆发的情况
logging.warning("请求过期[%d],创建时间:%s 超时时间:%.1f:%s %s ",
request_id, req_info.create_datetime, request_timeout, func_name, p_struct)
continue
self.logger.debug('发送请求[%d] -> %s %s', request_id, func_name, p_struct)
for n_time in range(1, 4):
req_ret = func(p_struct, request_id)
self.log_req_if_error(req_ret, func_name=func_name)
datetime_now = datetime.now()
datetime_last_req = datetime_now
if req_ret == 0:
req_info.handle_datetime = datetime_now
self._request_info_wait_resp_dic[request_id] = req_info
break
elif req_ret == -3:
# -3,表示每秒发送请求数超过许可数
# 等待一段时间后,再次发送
time.sleep(0.5 * n_time)
continue
else:
break
except Empty:
pass
except:
self.logger.exception("执行请求失败")
finally:
time.sleep(0.1)
# self.logger.debug("后台请求队列处理线程 Looping 3")
self.logger.info("%s 后台请求队列处理线程 结束", self.__class__.__name__)
def _get_response(self, request_id):
"""
每个request发送后,获得response时,执行此函数,用于处理相关请求队列
:param request_id:
:return:
"""
self._request_info_resp_queue.put(request_id)
class StrategyOrder:
"""
用户记录用户从 ReqOrderInsert, OnRspOrderInsert, 只到 OnRtnTrade 的全部数据信息
"""
def __init__(self, strategy_id, front_id, session_id, order_ref, input_order):
"""
用户记录用户从 ReqOrderInsert 请求中的 strategy_id, input_order
:param strategy_id:
:param input_order:
"""
self.__strategy_id = strategy_id
self.front_id = front_id
self.session_id = session_id
self.order_ref = order_ref
self.__input_order = None
self.trade_list = []
self.input_order = input_order
@property
def strategy_id(self):
return self.__strategy_id
@property
def input_order(self):
return self.__input_order
@input_order.setter
def input_order(self, input_order):
self.__input_order = input_order
# with with_mongo_client() as client:
# db = client[Config.MONGO_DB_NAME]
# collection = db[Config.MONGO_COLLECTION_INPUT_ORDER]
# collection.insert_one(input_order)
def __str__(self):
return "%s(front_id=%d, session_id=%d, order_ref=%r)" % (
self.__class__.__name__, self.front_id, self.session_id, self.order_ref)
__repr__ = __str__
class MyMdApi(MdApi, ApiBase):
"""
MdApi的本地封装接口
"""
def __init__(self,
instrument_id_list,
broker_id=Config.BROKER_ID,
user_id=Config.USER_ID,
password=Config.PASSWORD):
ApiBase.__init__(self, broker_id=broker_id,
user_id=user_id,
password=password)
self.instrument_id_list = instrument_id_list
self.front_id = 0
self.Create()
self.md_saver_dic = {}
self.md_min1_combiner_dic = {}
self.md_minn_combiner_dic = {}
# 已订阅的合约列表
self.sub_instrument_list = []
# self.pub_sub = None # 似乎已经没用了
# self._md_handler = {}
# def register_md_handler(self, name, md_handler):
# self._md_handler[name] = md_handler
# def _handle_depth_md(self, depth_md_dic):
# error_name_list = []
# for name, handler in self._md_handler.items():
# try:
# handler(depth_md_dic)
# # self.logger.debug('%s data handle finished', name)
# except:
# self.logger.exception('%s run with error will be del on _md_handler', name)
# error_name_list.append(name)
# for name in error_name_list:
# del self._md_handler[name]
# self.logger.warning('从 _md_handler 中移除 %s', name)
# def insert_depth_md_2_queue(self, depth_md_dic):
# """
# 将Tick 数据推送到 md_tick_saver 的队列,供批量保存使用
# 将Tick 数据推送到 md_minute_saver 的队列,供生成分钟K线使用
# :param depth_md_dic:
# :return:
# """
# # 将Tick 数据推送到 md_tick_saver 的队列,供批量保存使用
# self.md_tick_saver.queue_md.put(depth_md_dic)
# # 将Tick 数据推送到 md_minute_saver 的队列,供生成分钟K线使用
# instrument_id = depth_md_dic['InstrumentID']
# md_minute_saver = self.md_minute_saver_dic.setdefault(instrument_id, None)
# if md_minute_saver is not None:
# md_minute_saver.queue_md.put(depth_md_dic)
# else:
# self.logger.warning('%s is not on list for minute saver', instrument_id)
def Create(self): # , pszFlowPath='', bIsUsingUdp=False, bIsMulticast=False
dir_path = self.get_tmp_path()
self.logger.info('cache %s', dir_path)
return super().Create(dir_path)
def RegisterFront(self, front=Config.FRONT_MD_ADDRESS):
self.logger.info('%s', front)
if isinstance(front, bytes):
return super().RegisterFront(front)
for pszFrontAddress in front:
super().RegisterFront(pszFrontAddress)
def OnFrontConnected(self):
self.logger.info('-> ReqUserLogin')
self.ReqUserLogin()
def OnFrontDisconnected(self, nReason):
"""
当客户端与交易后台通信连接断开时,该方法被调用。当发生这个情况后,API会自动重新连接,客户端可不做处理。
:param nReason:
:return:
"""
self.logger.warning('API将自动重新连接,客户端可不做处理 reason=%s %s',
nReason, FRONT_DISCONNECTED_REASON_DIC.setdefault(nReason, '未知原因'))
super().OnFrontDisconnected(self, nReason)
def OnHeartBeatWarning(self, nTimeLapse):
self.logger.debug('nTimeLapse=%s', nTimeLapse)
def ReqUserLogin(self): # , pReqUserLogin, nRequestID
pReqUserLogin = ApiStruct.ReqUserLogin(BrokerID=self.broker_id, | |
#!/usr/bin/python
#
# Copyright 2018, <NAME>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from time import time
import os,sys,re,subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import random
import logging
import argparse
import pickle
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist,squareform
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold,GridSearchCV
from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError:
print("INFO: Please install seaborn package for plotting.")
__author__ = 'chris'
def extract_features(mol, sourcename, pos, printHeader=True, fillNa=np.nan, xyz_file=None, plot=False, useSelectionRules=True, OrderAtoms=True, bondAngles=True, skipH=False, addBonds=False, verbose=False):
"""
Create feature matrix from RDKit mol object or xyz file
:param mol: RDKit molecule
:param sourcename: name of sd file
:param pos: position in sdf
:param xyz_file: name of xyz
:param plot: plotting
:param useSelectionRules: use rules to remove strange bonds
:param OrderAtoms: larger atomic number first
:param bondAngles: add bond angles, i.e. distance to third atom
:param skipH: remove H
:param addBonds: add neighbor bonds as features
:param printHeader: prints column headers
:param fillNa: how to fill NA values
:param verbose: verbosity on/off
:return: pandas dataframe with feature matrix
"""
pt = Chem.GetPeriodicTable()
if xyz_file is not None:
if xyz_file.lower().endswith(".xyz"):
atomtypes, coords, q, title = read_xyz(xyz_file, skipH=skipH)
elif xyz_file.lower().endswith(".pdb"):
atomtypes, coords, q, title = read_pdbfile(xyz_file, skipH=skipH)
if q!=0:
logging.info("Found charge: %.2f"%(q))
dm = squareform(pdist(np.asarray(coords)))
else:
if skipH:
try:
mol = Chem.RemoveHs(mol)
except ValueError as e:
logging.info("Skipping H deletion for molecule at pos:" + str(pos))
return(None)
#check if bonds are available
try:
if not addBonds and mol.GetNumBonds(onlyHeavy=False)==0:
logging.info("No bonds found: skipping molecule %s " %Chem.MolToSmiles(mol))
return (None)
except RuntimeError as e:
logging.info("RuntimeError: skipping molecule")
return(None)
dm = Chem.Get3DDistanceMatrix(mol) # both should be the same!!!
q = Chem.GetFormalCharge(mol)
n,m = dm.shape
assert(n == m)
if plot:
plt.pcolormesh(dm)
plt.colorbar()
plt.xlim([0, n])
plt.ylim([0, n])
plt.show()
dist_cut = 3.0 # distance cutoff
n_cut = 3 # neighbour cutoff
if printHeader and verbose:
print('{:<4s}{:<4s}{:>4s}{:>3s}{:>3s}{:>8s}'.format('ID1','ID2','Q', '#1', '#2', 'DIST'),end='')
for i in range(2*n_cut):
if addBonds:
print('{:>4s}{:>3s}{:>8s}{:>8s}{:>4s}'.format('POS', '#', 'DIST', 'DISTB','BNB'),end='')
elif bondAngles:
print('{:>4s}{:>3s}{:>8s}{:>8s}'.format('POS', '#', 'DIST','DISTB'),end='')
else:
print('{:4s}{:3s}{:8s}'.format('POS', '#', 'DIST'),end='')
print("{:4s}".format('TYPE'))
df = []
index = []
for i in range(0,n):
if xyz_file is not None:
bnd_at1 = atomtypes[i]
bond_num1 = pt.GetAtomicNumber(bnd_at1)
else:
bnd_at1 = mol.GetAtomWithIdx(i)
bond_num1 = bnd_at1.GetAtomicNum()
bnd_at1 = bnd_at1.GetSymbol()
for j in range(0,m):
row = []
if i >= j: continue
bnd_dist = dm[i,j]
if bnd_dist>dist_cut: continue
bnd_type = 0
if xyz_file is None:
bnd_at2 = mol.GetAtomWithIdx(j)
bond_num2 = bnd_at2.GetAtomicNum()
bnd = mol.GetBondBetweenAtoms(i, j)
if bnd is not None:
bnd_type = int(bnd.GetBondTypeAsDouble())
if bnd.GetIsAromatic():
bnd_type = 4
else:
bnd_type = 0
bnd_at2=bnd_at2.GetSymbol()
else:
bnd_at2 = atomtypes[j]
bond_num2 = pt.GetAtomicNumber(bnd_at2)
#sanity checks
if xyz_file is None:
# we accept very short bonds but give warning
selstr = "Skipping"
if not useSelectionRules:
selstr = "Keeping"
if bnd_dist<0.75 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<0.75): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
elif bnd_dist<1.1 and bond_num1>=6 and bond_num2>=6 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<1.1): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
# in case of problems we discard whole molecule
elif bnd_dist < 0.75 and (bond_num1 == 1 or bond_num2 == 1) and bnd_type == 0:
logging.warn("%s unreasonable short X-H distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return (None)
elif bnd_dist < 1.5 and bond_num1==6 and bond_num2==6 and bnd_type==0:
logging.warn("%s unreasonable short C-C distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
elif bnd_dist < 1.0 and bond_num1>=6 and bond_num2>=6 and bnd_type==0:
logging.warn("%s unreasonable short distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
# rather generous cutoff
elif bnd_dist>1.8 and bond_num1==6 and bond_num2==6 and bnd_type>0:
logging.warn("%s unreasonable long C-C bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(selstr,bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
if useSelectionRules: return(None)
#unique order
if OrderAtoms and bond_num1<bond_num2:
row.extend([j + 1, i + 1, q,bond_num2, bond_num1, bnd_dist])
i_tmp,j_tmp = j,i
else:
row.extend([i + 1, j + 1, q,bond_num1, bond_num2, bnd_dist])
i_tmp, j_tmp = i, j
if verbose: print('{:<4d}{:<4d}{:4.1f}{:3d}{:3d}{:8.3f}'.format(i_tmp+1,j_tmp+1,q,bond_num1,bond_num2,bnd_dist),end='')
# now iterate over neighbors of a and b and i.e. sort row a and b and concat, then skip i and j
for a in [i_tmp,j_tmp]:
row_sorted_a = np.argsort(dm[a,:])
count = 0
k = 0
if len(row_sorted_a) > 2:
for nextn in row_sorted_a:
nextn = int(nextn)
if nextn == j_tmp or nextn == i_tmp:
continue
if k==n_cut:break
dist = dm[a,nextn]
if xyz_file is None:
at = mol.GetAtomWithIdx(nextn)
num = at.GetAtomicNum()
at = at.GetSymbol()
else:
at = atomtypes[nextn]
num = pt.GetAtomicNumber(at)
if bondAngles:
other = i_tmp if a==j_tmp else j_tmp
distb = dm[other,nextn]
if addBonds:
bndb = mol.GetBondBetweenAtoms(a, nextn)
if bndb is not None:
bnd_typeb = int(bndb.GetBondTypeAsDouble())
if bndb.GetIsAromatic():
#bnd_type=randint(1,2)
bnd_typeb = 4
else:
bnd_typeb = 0
row.extend([num, dist, distb,bnd_typeb])
if verbose:
print('{:4d}{:>3d}{:8.3f}{:8.3f}{:4d}'.format(nextn+1,num,dist,distb,bnd_typeb),end='')
else:
row.extend([num, dist,distb])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}{:8.3f}'.format(nextn+1,at,num,dist,distb),end='')
else:
row.extend([num, dist])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}'.format(nextn+1,at,num,dist),end='')
k += 1
count += 1
# padding
while count<n_cut:
count += 1
if verbose:
print('{:>4d}{:>3s}{:3d}{:8.3f}'.format(0,"NA", 0, fillNa),end='')
row.extend([0, fillNa])
if bondAngles:
row.extend([fillNa])
if verbose: print('{:4d}'.format( bnd_type),end='')
row.append(bnd_type)
df.append(row)
index.append(sourcename + '_pos' + str(pos+1) + '_' + str(i_tmp + 1) + 'x' + str(j_tmp + 1))
try:
df = pd.DataFrame(df)
colnames = ['id1','id2','q','ata','atb','distab','ata1','dista1','ata2','dista2','ata3','dista3','atb1','distb1','atb2','distb2','atb3','distb3','bond']
if addBonds:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1', 'dista1b','bonda1', 'ata2', 'dista2',
'dista2b','bonda2', 'ata3', 'dista3', 'dista3b','bonda3',
'atb1', 'distb1', 'distb1a','bondb1', 'atb2', 'distb2', 'distb2a','bondb2', 'atb3', 'distb3', 'distb3a','bondb3', 'bond']
elif bondAngles:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1','dista1b', 'ata2', 'dista2','dista2b', 'ata3', 'dista3','dista3b',
'atb1', 'distb1','distb1a', 'atb2', 'distb2','distb2a', 'atb3', 'distb3','distb3a','bond']
if len(colnames)!=len(df.columns):
logging.error("Mismatch in dataframe colums for %s - SMILES: %s"%(sourcename+'_pos'+str(pos+1), Chem.MolToSmiles(mol)))
df.columns = colnames
df.index = index
except ValueError:
#i.e. for empty dataframes
df = None
return df
def convert_sdf2dataframe(infile, outfile="moldat.csv", fillNa=np.nan, sanitize=True, tempsave=False, useSelectionRules=True, skipH=False, addBonds=True, sample=None, debug=False, verbose=False):
"""
Generate training dataset from list of sd files
sd file -> Pandas DataFrame
:param infile: sd file used for training
:param outfile: feature matrix as .csv file
:param fillNa: fill value for NA positions
:param sanitize: switch this off for special molecules RDKit cannot digest, should be True in order to have aromatic bonds
:param tempsave: save temporary data
:param useSelectionRules: apply rules to filter nonsense structures
:param skipH: remove hydrogens
:param addBonds: inject neighbor bonds to feature matrix
:param sample: subsample dataset fraction [0-1]
:param verbose: verbosity on/off
:return: feature matrix as pandas dataframe
"""
logging.info("Generating feature using RDKit matrix from: %s -- with options skipH (%r) iterative(%r) filterRubbish(%r) "%(infile,skipH,addBonds,useSelectionRules))
if sample is not None:
logging.info("Subsampling fraction %4.2f of dataset"%(sample))
np.random.seed(42)
df_new = None
suppl = Chem.SDMolSupplier(infile,removeHs=skipH,sanitize=False)
count=0
for i,mol in enumerate(suppl):
if | |
"""
Group Configuration Tests.
"""
import json
from operator import itemgetter
from unittest.mock import patch
import ddt
from cms.djangoapps.contentstore.course_group_config import (
CONTENT_GROUP_CONFIGURATION_NAME,
ENROLLMENT_SCHEME,
GroupConfiguration
)
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import reverse_course_url, reverse_usage_url
from openedx.features.content_type_gating.helpers import CONTENT_GATING_PARTITION_ID
from openedx.features.content_type_gating.partitions import CONTENT_TYPE_GATING_SCHEME
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, Group, UserPartition
from xmodule.validation import StudioValidation, StudioValidationMessage
GROUP_CONFIGURATION_JSON = {
'name': 'Test name',
'scheme': 'random',
'description': 'Test description',
'version': UserPartition.VERSION,
'groups': [
{
'name': 'Group A',
'version': 1,
}, {
'name': 'Group B',
'version': 1,
},
],
}
# pylint: disable=no-member
class HelperMethods:
"""
Mixin that provides useful methods for Group Configuration tests.
"""
def _create_content_experiment(self, cid=-1, group_id=None, cid_for_problem=None,
name_suffix='', special_characters=''):
"""
Create content experiment.
Assign Group Configuration to the experiment if cid is provided.
Assigns a problem to the first group in the split test if group_id and cid_for_problem is provided.
"""
sequential = ItemFactory.create(
category='sequential',
parent_location=self.course.location,
display_name=f'Test Subsection {name_suffix}'
)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name=f'Test Unit {name_suffix}'
)
c0_url = self.course.id.make_usage_key("vertical", "split_test_cond0")
c1_url = self.course.id.make_usage_key("vertical", "split_test_cond1")
c2_url = self.course.id.make_usage_key("vertical", "split_test_cond2")
split_test = ItemFactory.create(
category='split_test',
parent_location=vertical.location,
user_partition_id=cid,
display_name=f"Test Content Experiment {name_suffix}{special_characters}",
group_id_to_child={"0": c0_url, "1": c1_url, "2": c2_url}
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=c0_url,
)
c1_vertical = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=c1_url,
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 2 vertical",
location=c2_url,
)
problem = None
if group_id and cid_for_problem:
problem = ItemFactory.create(
category='problem',
parent_location=c1_vertical.location,
display_name="Test Problem"
)
self.client.ajax_post(
reverse_usage_url("xblock_handler", problem.location),
data={'metadata': {'group_access': {cid_for_problem: [group_id]}}}
)
c1_vertical.children.append(problem.location)
partitions_json = [p.to_json() for p in self.course.user_partitions]
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test.location),
data={'metadata': {'user_partitions': partitions_json}}
)
self.save_course()
return vertical, split_test, problem
def _create_problem_with_content_group(self, cid, group_id, name_suffix='', special_characters='', orphan=False):
"""
Create a problem
Assign content group to the problem.
"""
vertical_parent_location = self.course.location
if not orphan:
subsection = ItemFactory.create(
category='sequential',
parent_location=self.course.location,
display_name=f"Test Subsection {name_suffix}"
)
vertical_parent_location = subsection.location
vertical = ItemFactory.create(
category='vertical',
parent_location=vertical_parent_location,
display_name=f"Test Unit {name_suffix}"
)
problem = ItemFactory.create(
category='problem',
parent_location=vertical.location,
display_name=f"Test Problem {name_suffix}{special_characters}"
)
group_access_content = {'group_access': {cid: [group_id]}}
self.client.ajax_post(
reverse_usage_url("xblock_handler", problem.location),
data={'metadata': group_access_content}
)
if not orphan:
self.course.children.append(subsection.location)
self.save_course()
return vertical, problem
def _add_user_partitions(self, count=1, scheme_id="random"):
"""
Create user partitions for the course.
"""
partitions = [
UserPartition(
i, 'Name ' + str(i), 'Description ' + str(i),
[Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')],
scheme=None, scheme_id=scheme_id
) for i in range(count)
]
self.course.user_partitions = partitions
self.save_course()
# pylint: disable=no-member
class GroupConfigurationsBaseTestCase:
"""
Mixin with base test cases for the group configurations.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We use this method to clean up response when creating new group configurations.
Returns a tuple that contains removed group configuration ID and group IDs.
"""
configuration_id = content.pop("id")
group_ids = [group.pop("id") for group in content["groups"]]
return (configuration_id, group_ids)
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the configuration
{
'description': 'Test description',
'groups': [
{'name': 'Group A'},
{'name': 'Group B'},
],
},
# must have at least one group
{
'name': 'Test name',
'description': 'Test description',
'groups': [],
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# No property name.
invalid_json = "{'name': 'Test Name', []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("error", content)
@ddt.ddt
class GroupConfigurationsListHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_list_handler.
"""
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('group_configurations_list_handler', self.course.id)
def test_view_index_ok(self):
"""
Basic check that the groups configuration page responds correctly.
"""
# This creates a random UserPartition.
self.course.user_partitions = [
UserPartition(0, 'First name', 'First description', [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')]), # lint-amnesty, pylint: disable=line-too-long
]
self.save_course()
if 'split_test' not in self.course.advanced_modules:
self.course.advanced_modules.append('split_test')
self.store.update_item(self.course, self.user.id)
response = self.client.get(self._url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'First name', count=1)
self.assertContains(response, 'Group C')
self.assertContains(response, CONTENT_GROUP_CONFIGURATION_NAME)
def test_unsupported_http_accept_header(self):
"""
Test if not allowed header present in request.
"""
response = self.client.get(
self._url(),
HTTP_ACCEPT="text/plain",
)
self.assertEqual(response.status_code, 406)
def test_can_create_group_configuration(self):
"""
Test that you can create a group configuration.
"""
expected = {
'description': 'Test description',
'name': '<NAME>',
'scheme': 'random',
'version': UserPartition.VERSION,
'groups': [
{'name': 'Group A', 'version': 1},
{'name': 'Group B', 'version': 1},
],
'parameters': {},
'active': True
}
response = self.client.ajax_post(
self._url(),
data=GROUP_CONFIGURATION_JSON
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content.decode('utf-8'))
configuration_id, group_ids = self._remove_ids(content) # pylint: disable=unused-variable
self.assertEqual(content, expected)
# IDs are unique
self.assertEqual(len(group_ids), len(set(group_ids)))
self.assertEqual(len(group_ids), 2)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, 'Group A')
self.assertEqual(user_partititons[0].groups[1].name, 'Group B')
self.assertEqual(user_partititons[0].parameters, {})
def test_lazily_creates_cohort_configuration(self):
"""
Test that a cohort schemed user partition is NOT created by
default for the user.
"""
self.assertEqual(len(self.course.user_partitions), 0)
self.client.get(self._url())
self.reload_course()
self.assertEqual(len(self.course.user_partitions), 0)
@ddt.data('content_type_gate', 'enrollment_track')
def test_cannot_create_restricted_group_configuration(self, scheme_id):
"""
Test that you cannot create a restricted group configuration.
"""
group_config = dict(GROUP_CONFIGURATION_JSON)
group_config['scheme'] = scheme_id
group_config.setdefault('parameters', {})['course_id'] = str(self.course.id)
response = self.client.ajax_post(
self._url(),
data=group_config
)
self.assertEqual(response.status_code, 400)
@ddt.ddt
class GroupConfigurationsDetailHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_detail_handler.
"""
ID = 0
def _url(self, cid=-1):
"""
Return url for the handler.
"""
cid = cid if cid > 0 else self.ID
return reverse_course_url(
'group_configurations_detail_handler',
self.course.id,
kwargs={'group_configuration_id': cid},
)
def test_can_create_new_content_group_if_it_does_not_exist(self):
"""
PUT new content group.
"""
expected = {
'id': 666,
'name': '<NAME>',
'scheme': 'cohort',
'description': 'Test description',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1, 'usage': []},
{'id': 1, 'name': 'Group B', 'version': 1, 'usage': []},
],
'parameters': {},
'active': True,
}
response = self.client.put(
self._url(cid=666),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partitions = self.course.user_partitions
self.assertEqual(len(user_partitions), 1)
self.assertEqual(user_partitions[0].name, '<NAME>')
self.assertEqual(len(user_partitions[0].groups), 2)
self.assertEqual(user_partitions[0].groups[0].name, 'Group A')
self.assertEqual(user_partitions[0].groups[1].name, 'Group B')
self.assertEqual(user_partitions[0].parameters, {})
def test_can_edit_content_group(self):
"""
Edit content group and check its id and modified fields.
"""
self._add_user_partitions(scheme_id='cohort')
self.save_course()
expected = {
'id': self.ID,
'name': '<NAME>',
'scheme': 'cohort',
'description': 'New Test description',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'New Group Name', 'version': 1, 'usage': []},
{'id': 2, 'name': 'Group C', 'version': 1, 'usage': []},
],
'parameters': {},
'active': True,
}
response = self.client.put(
self._url(),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'New Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, 'New Group Name')
self.assertEqual(user_partititons[0].groups[1].name, 'Group C')
self.assertEqual(user_partititons[0].parameters, {})
def test_can_delete_content_group(self):
"""
Delete content group and check user partitions.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
self.save_course()
details_url_with_group_id = self._url(cid=0) + '/1'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that group and partition is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Name 0')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[1].name, 'Group C')
def test_cannot_delete_used_content_group(self):
"""
Cannot delete content group if it is in use.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
self._create_problem_with_content_group(cid=0, group_id=1)
details_url_with_group_id = self._url(cid=0) + '/1'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('utf-8'))
self.assertTrue(content['error'])
self.reload_course()
# Verify that user_partitions and groups are still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(len(user_partititons[0].groups), 3)
self.assertEqual(user_partititons[0].groups[1].name, 'Group B')
def test_cannot_delete_non_existent_content_group(self):
"""
Cannot delete content group if it is doesn't exist.
"""
self._add_user_partitions(count=1, scheme_id='cohort')
details_url_with_group_id = self._url(cid=0) + '/90'
response = self.client.delete(
details_url_with_group_id,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(len(user_partititons[0].groups), 3)
def test_can_create_new_group_configuration_if_it_does_not_exist(self):
"""
PUT new group configuration when no configurations exist in the course.
"""
expected = {
'id': 999,
'name': '<NAME>',
'scheme': 'random',
'description': 'Test description',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
],
'usage': [],
'parameters': {},
'active': True,
}
response = self.client.put(
self._url(cid=999),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions in | |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import tensorflow as tf
# from tensorflow.keras import layers, optimizers
from matplotlib.pyplot import MultipleLocator
import os
from collections import defaultdict
# import __main__
# __main__.pymol_argv = ['pymol', '-qc']
# import pymol as pm
import seaborn as sns
# from scipy import stats
np.set_printoptions(suppress=True) # Cancel scientific counting display
np.set_printoptions(threshold=np.inf)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # Macos needs to be true
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# DIY acc
"""def Myaccc(y_true, y_pred):
y_true = tf.cast(y_true, dtype=tf.int32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_pred, axis=1, output_type=tf.int32),
tf.argmax(y_true, axis=1, output_type=tf.int32)), tf.float32)) # Number of rows saved
return accuracy
"""
"""class Myacc(tf.keras.metrics.Metric):
def __init__(self):
super().__init__()
self.total = self.add_weight(name='total', dtype=tf.int32, initializer=tf.zeros_initializer())
self.count = self.add_weight(name='count', dtype=tf.int32, initializer=tf.zeros_initializer())
def update_state(self, y_true, y_pred, sample_weight=None):
values = tf.cast(tf.equal(tf.argmax(y_true, axis=1, output_type=tf.int32),
tf.argmax(y_pred, axis=1, output_type=tf.int32)), tf.int32)
self.total.assign_add(tf.shape(y_true)[0])
self.count.assign_add(tf.reduce_sum(values))
def result(self):
return self.count / self.total
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.total.assign(0)
self.count.assign(0)
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
if logs.get("val_myacc") > 0.95 and logs.get("loss") < 0.1:
print("\n meet requirements so cancelling training!")
self.model.stop_training = True
"""
"""def plotNNout(self):
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('probability', fontsize=20)
# ax1.set_xlabel('0', fontsize=20)
ax1.set_ylabel('probability', fontsize=20)
ax1.set_ylabel('frame', fontsize=20)
for i in range(1,8):
path = './models/{0}'.format(i)
model = tf.saved_model.load(path)
# data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = np.load('./iptg_nobind.npy', allow_pickle=True)[500:]
# print(data_x.shape)
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
out = model(data_x)
print(out)
ax1.plot(range(4500), out[:,1])
# ax1.plot([0, 1], [0, 1], color='black')
plt.show()
def protran(self):
result=[]
for i in range(1,21):
path = './models/{0}'.format(i)
model = tf.saved_model.load(path)
data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
out = model(data_x)
mean_model = tf.reduce_mean(out[:,0])
result.append(mean_model)
print(mean_model)
print(result)
print("total_mean:", np.mean(result))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('process', fontsize=20)
ax1.set_xlabel('frame', fontsize=20)
ax1.set_ylabel('probability to Nobind', fontsize=20)
ax1.plot(range(5000),out[:,0])
plt.show()
def train(self,
# i
):
for i in range(7, 8): # Batch training of neural networks
path = self.ANN + "twostates_train.npy" # Read training data
train_x = np.load(path, allow_pickle=True)
test_x = np.load('./iptg_nobind.npy', allow_pickle=True) # Read test data,5000
train_y = np.zeros(shape=(train_x.shape[0])) # Set label,9000
train_y[:4500] = 1
test_y = np.zeros(shape=(test_x.shape[0])) # 5000
# print(train_x.shape, test_x.shape)
dataset_x = np.concatenate((train_x, test_x), axis=0) # Combine training set and test set,14000
# print(dataset_x.shape)
dataset_x = self.norm(dataset_x)
dataset_y = np.concatenate((train_y, test_y)) # Merge tags,14000
# train
dataset_x = tf.convert_to_tensor(dataset_x, dtype=tf.float32)
dataset_y = tf.convert_to_tensor(dataset_y, dtype=tf.int32)
dataset_y_onehot = tf.one_hot(dataset_y, depth=2, dtype=tf.int32)
model = tf.keras.Sequential([
layers.Dense(256, activation=tf.nn.tanh),
layers.Dense(128, activation=tf.nn.tanh),
layers.Dense(64, activation=tf.nn.tanh),
layers.Dense(32, activation=tf.nn.tanh),
layers.Dense(16, activation=tf.nn.tanh),
layers.Dense(8, activation=tf.nn.tanh),
layers.Dense(4, activation=tf.nn.tanh),
layers.Dense(2, activation=tf.nn.softmax)
])
callbacks = MyCallback()
model.compile(optimizer=optimizers.Adam(learning_rate=0.00001),
loss=tf.losses.binary_crossentropy,
metrics=[
Myacc()
])
models_path = './models/' #
logs_dir = './logs/{0}/'.format(i)
logs_train_dir = os.path.join(logs_dir, "train")
logs_valid_dir = os.path.join(logs_dir, "valid")
for dir_name in [logs_dir, logs_train_dir, logs_valid_dir, models_path]:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
summary_writer = tf.summary.create_file_writer(logs_train_dir)
model.fit(
dataset_x,
dataset_y_onehot,
epochs=10000,
shuffle=True,
batch_size=100,
validation_split=5 / 14,
# validation_data=(dataset_x[9000:], dataset_y[9000:]),
callbacks=[callbacks]
)
tf.saved_model.save(model, models_path+'{0}'.format(i))
def testmodels(self):
model1 = tf.saved_model.load("./modelsset2/18")
# model2 = tf.saved_model.load("./models/2")
# data_x = np.load('./iptg_nobind.npy', allow_pickle=True)
data_x = np.load('./Bind.npy', allow_pickle=True)[500:]
data_x = self.norm(data_x)
data_x = tf.convert_to_tensor(data_x, dtype=tf.float32)
# label = np.zeros(shape=(data_x.shape[0]))
# label = tf.convert_to_tensor(label, dtype=tf.int32) #
out1 = model1(data_x)
# print(out)
# out2 = model2(data_x)
pro1 = out1[:,1]
# pro2 = out2[:, 0]
# print(pro1[3754])
# print(pro2[3754])
print(pro1)
print(np.where(pro1==np.min(pro1)))"""
class RAF:
def __init__(self):
self.contact_dis = 4.5 # contact distance between heavy atoms
self.startFrame = 1 # first frame
self.endFrame = 5000 + 1 # last frame
# self.set_name = 7
self.aa = ["GLY", "ALA", "VAL", "LEU", "ILE", "PHE", "TRP", "TYR", "ASP", "ASN",
"GLU", "LYS", "GLN", "MET", "SER", "THR", "CYS", "PRO", "HIS", "ARG",
"HID", "ASN", "ASH", "HIE", "HIP"]
self.data_name = ""
# self.csv_path = ""
# self.frame_path = ""
self.ANN = ""
# self.output = ""
# self.startSet = 1
# self.endSet = 10 + 1
self.Interval = 1 # frame interval
self.frame_name = "md{0}.pdb" # name of every frame
self.csv_name = "{0}.csv" # name of every csv
self.vmd_rmsd_path = "/Users/erik/Desktop/MD_WWN/test_100ns/"
self.RAF_backbone_mass = [14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01, 14.01, 12.01, 12.01,
14.01, 12.01, 12.01, 14.01, 12.01, 12.01]
self.sasa_max = {"GLY": 87.6,
"ALA": 111.9,
"VAL": 154.4,
"LEU": 183.1,
"ILE": 179.1,
"PHE": 206.4,
"TRP": 239.0,
"TYR": 224.6,
"ASP": 169.4,
"ASN": 172.5,
"GLU": 206.0,
"LYS": 212.5,
"GLN": 204.4,
"MET": 204.3,
"SER": 132.9,
"THR": 154.1,
"CYS": 139.3,
"PRO": 148.898, # No hydrogen bonds found, so this figure is calculated by pymol
"HIS": 188.5,
"ARG": 249.0
} # Angstroms^2, using 5-aa stride
# self.hydrophobic_index = [[16, 20, 34, 38, 50, 53], [15, 16, 18, 20]]
# self.hydrophobic_index = [[16, 20, 34, 38, 50, 53], [82, 83, 85, 87]]
self.hydrophobic_index = [16, 20, 34, 38, 50, 53, 82, 83, 85, 87] # sasa_statistics
self.either_index = [37, 41, 45]
self.hydrophilic_index = [36]
self.stride = {"C": "Coil", "T": "Turn", "B": "Bridge", "b": "Bridge", "E": "Strand", "I": "PI_helix",
"G": "310Helix", "H": "AlphaHelix"}
def processIon(self, aa): # Dealing with protonation conditions
if aa in ['ASH']:
return 'ASP'
if aa in ['HIE', 'HID', 'HIP']:
return 'HIS'
return aa
def norm(self, data): # best to normalize the variance
# min-max
min_val = np.min(data)
max_val = np.max(data)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i][j] = (data[i][j] - min_val) / (max_val - min_val)
return data
def thin_data(self, li, fold=20): # randomly
y = []
for i in li:
t = np.random.uniform(low=0, high=1)
if t < 1.0 / fold:
y.append(i)
return y
def space_data(self, li, interval=20): # interval
y = []
count = 0
for i in li:
if count % interval == 0:
y.append(i)
count %= interval
count += 1
return y
def readHeavyAtom_singleChain(self, path) -> np.array:
# To read the coordinates of the heavy atoms of each chain, the chainID information is required
"""[[-21.368 108.599 3.145]
[-19.74 109.906 6.386]
[-19.151 113.618 6.922]
[-16.405 114.786 4.541]
...
[ 8.717 80.336 46.425]
[ 7.828 76.961 48.018]
[ 8.38 74.326 45.331]
[ 12.103 74.061 46.05 ]]"""
print("Reading:", path)
print("Better check out the last column in the input file!")
atom_cor = []
atom_nam = []
with open(path, 'r') as f:
for i in f.readlines():
record = i.strip()
atom = record[:4].strip()
if atom != "ATOM": # Detect ATOM start line
continue
# print(record)
serial = record[6:11].strip() # 697
atname = record[12:16].strip() # CA
resName = self.processIon(record[17:20].strip()) # PRO, Treated protonation conditions
if resName not in self.aa:
continue
resSeq = record[22:26].strip() # 3
cor_x = record[30:38].strip() # Å
cor_y = record[38:46].strip()
cor_z = record[46:54].strip()
element = record[13].strip() # C
xyz = [float(cor_x), float(cor_y), float(cor_z)]
# eg: 2-LYS-N-697
name = resSeq + "-" + resName + "-" + atname + "-" + serial
if element != "H":
atom_cor.append(xyz)
atom_nam.append(name)
return np.array(atom_cor), atom_nam
def euclidean(self, a_matrix, b_matrix):
# Using matrix operation to | |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if | |
array of all the missing fields')
errors_mapping[('MISSING_INFO', None)] = MissingInfo('User information is missing from the hash')
errors_mapping[('NOT_HASH', None)] = NotHash('The v parameter is not a JSON hash')
query_data = {
'api': self._api,
'url': '/link/sso',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def sid(
self,
email,
uuid,
):
"""Sid.
:param email: Email address to associate with this usage
:param uuid: The uuid of the link usage
"""
request_data = {
'email': email,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('NOT_FOUND', None)] = NotFound('The usage was not found')
query_data = {
'api': self._api,
'url': '/link/sid',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def mail(
self,
email,
uuid,
):
"""Mail.
:param email: Email address
:param uuid: The uuid of the link
"""
request_data = {
'email': email,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('INVALID_EMAIL', None)] = InvalidEmail('Enter a valid email address')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The link was not found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/link/mail',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def charge(
self,
charge_token,
uuid,
):
"""Charge.
:param charge_token: The stripe charge token
:param uuid: The uuid of the link
"""
request_data = {
'charge_token': charge_token,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('CHARGE_FAILED', None)] = ChargeFailed('The charge failed. The error_subtype holds the details on the error')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The link was not found')
query_data = {
'api': self._api,
'url': '/link/charge',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def pin(
self,
uuid,
):
"""Pin.
:param uuid: The uuid of the link
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The link was not found')
query_data = {
'api': self._api,
'url': '/link/pin',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
class AsyncLink:
"""AsyncLink."""
def __init__(self, api):
self._api = api
def list(
self,
account_id=None,
study_id=None,
user_id=None,
):
"""List.
:param account_id: account_id
:param study_id: study_id
:param user_id: user_id
"""
request_data = {
'account_id': account_id,
'study_id': study_id,
'user_id': user_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/link/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'links'
return AsyncQueryOPSF(**query_data)
def add(
self,
action,
prompt_for_anonymize,
acceptance_required=None,
account_id=None,
anonymize=None,
charge_amount=None,
charge_currency=None,
charge_description=None,
email=None,
filter=None,
include_priors=None,
max_hits=None,
meeting_id=None,
message=None,
mfm_page=None,
minutes_alive=None,
mobile_phone=None,
namespace_id=None,
notify=None,
parameters=None,
password=<PASSWORD>,
password_is_dob=None,
password_max_attempts=None,
pin_auth=None,
referer=None,
share_code=None,
share_on_view=None,
skip_email_prompt=None,
study_id=None,
ui_json=None,
upload_match=None,
upload_study_customfields=None,
use_share_code=None,
workflow=None,
):
"""Add.
:param action: Link action (STUDY_LIST|STUDY_VIEW|STUDY_UPLOAD)
:param prompt_for_anonymize: Flag to prompt if the anonymization rules should be applied on ingress
:param acceptance_required: Flag that acceptance of TOS is required (optional)
:param account_id: account_id
:param anonymize: Anonymization rules to the applied to any STUDY_UPLOAD done with this link. Rules are formatted as per the rules parameter in /namespace/anonymize (optional)
:param charge_amount: Amount to charge in pennies before the link can be accessed (optional)
:param charge_currency: Charge currency (optional)
:param charge_description: Charge description (optional)
:param email: Email the link to these addresses (optional)
:param filter: filter
:param include_priors: Include prior studies (optional)
:param max_hits: The maximum number of times the link can be used (optional)
:param meeting_id: UUID of the meeting to associate the link with (optional)
:param message: Message to include in the email (optional)
:param mfm_page: Flag to launch the MFM page instead of the viewer (optional)
:param minutes_alive: The maximum number of minutes the link will be alive for (optional)
:param mobile_phone: Send the link to this phone number (optional)
:param namespace_id: namespace_id
:param notify: Comma or space separated list of additional emails to notify of link usage (optional)
:param parameters: JSON array of parameters to add to the redirect URL or return in /namespace/share_code if an upload (optional)
:param password: <PASSWORD> (optional)
:param password_is_dob: Flag that the password is the patient_birth_date for the study (study_id is required) (optional)
:param password_max_attempts: The maximum number of failed password attempt (optional)
:param pin_auth: An account member email and PIN authentication is required (optional)
:param referer: The link can only be accessed from the specified referer. The referer can be a regexp to match multiple referers (optional)
:param share_code: share code for a STUDY_UPLOAD (optional)
:param share_on_view: Flag to share the study with the email after it is viewed (optional)
:param skip_email_prompt: Skip the prompt for email step (optional)
:param study_id: study_id
:param ui_json: JSON for UI settings (optional)
:param upload_match: A JSON hash of DICOM tags and regular expressions they must match uploaded against this link (optional)
:param upload_study_customfields: A JSON hash of customfields that will be mapped to a study on study upload. A key is a customfield UUID, a value is a value for the field (optional)
:param use_share_code: Flag to use the namespace share code settings for a STUDY_UPLOAD (optional)
:param workflow: The workflow this link is intended for (patient_studies) (optional)
"""
request_data = {
'acceptance_required': acceptance_required,
'account_id': account_id,
'action': action,
'anonymize': anonymize,
'charge_amount': charge_amount,
'charge_currency': charge_currency,
'charge_description': charge_description,
'email': email,
'filter': filter,
'include_priors': include_priors,
'max_hits': max_hits,
'meeting_id': meeting_id,
'message': message,
'mfm_page': mfm_page,
'minutes_alive': minutes_alive,
'mobile_phone': mobile_phone,
'namespace_id': namespace_id,
'notify': notify,
'parameters': parameters,
'password': password,
'password_is_dob': <PASSWORD>_dob,
'password_max_attempts': password_max_attempts,
'pin_auth': pin_auth,
'prompt_for_anonymize': prompt_for_anonymize,
'referer': referer,
'share_code': share_code,
'share_on_view': share_on_view,
'skip_email_prompt': skip_email_prompt,
'study_id': study_id,
'ui_json': ui_json,
'upload_match': upload_match,
'upload_study_customfields': upload_study_customfields,
'use_share_code': use_share_code,
'workflow': workflow,
}
errors_mapping = {}
errors_mapping[('INVALID_ACTION', None)] = InvalidAction('An invalid action was passed')
errors_mapping[('INVALID_CHARGE', None)] = InvalidCharge('The charge is invalid. The error_subtype holds the details on the error')
errors_mapping[('INVALID_EMAIL', None)] = InvalidEmail('An invalid email address was passed')
errors_mapping[('INVALID_FIELD_NAME', None)] = InvalidFieldName('The field name in the rules hash is invalid. The error_subtype holds the invalid field name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('INVALID_PHI_FIELD', None)] = InvalidPhiField('The password_is_phi field is invalid or a study_id was not passed')
errors_mapping[('INVALID_PHONE', None)] = InvalidPhone('An invalid cellular phone number was passed')
errors_mapping[('INVALID_REGEXP', None)] = InvalidRegexp('Invalid regular expression. The error_subtype holds the invalid regexp.')
errors_mapping[('INVALID_UPLOAD_MATCH', None)] = InvalidUploadMatch('The upload_match is invalid. The error_subtype holds the details on the error')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The patient or study could not be found. The error_subtype holds the uuid that can not be found')
errors_mapping[('NOT_HASH', None)] = NotHash('The rules field is not a hash')
errors_mapping[('NOT_LIST', None)] = NotList('The field is not a JSON array. The error_subtype holds the name of the field')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You | |
files found', file_pat
raise Exception("get_files: no image files found!")
db2.close()
#print "get_files| DONE with func"
return dict_files
def combine_cats(cats,outfile,search_params): #step2_sextract
'''inputs: cats,outfile,search_params
returns:
calls:
called_by: sextract'''
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
try:
print 'combine_cats| START the func. inputs: cats=',cats , ' outfile=',outfile #, ' search_params=',search_params
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
for catalog in cats:
file = catalog['cat']
aper = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
command_ldactoasc = progs_path['p_ldactoasc'] + ' -i ' + catalog['cat'] + ' -b -s -k MAG_APER MAGERR_APER MAG_APER MAGERR_APER -t OBJECTS > ' + aper
print 'combine_cats| command_ldactoasc=',command_ldactoasc
ooo=os.system(command_ldactoasc)
if ooo!=0: raise Exception("the line os.system(command_ldactoasc) failed\ncommand_ldactoasc="+command_ldactoasc)
cat1 = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
command_asctoldac = progs_path['p_asctoldac']+' -i ' + aper + ' -o ' + cat1 + ' -t OBJECTS -c ' + os.environ['bonn'] + '/photconf/MAG_APER.conf'
print 'combine_cats| command_asctoldac=',command_asctoldac
ooo=os.system(command_asctoldac)
if ooo!=0: raise Exception("the line os.system(command_asctoldac) failed\ncommand_asctoldac="+command_asctoldac)
allconv = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
#adam-watch# hmm, this doesn't have a -t input, does it need one?
command_ldacjoinkey = progs_path['p_ldacjoinkey']+' -i ' + catalog['cat'] + ' -p ' + cat1 + ' -o ' + allconv + ' -k MAG_APER1 MAG_APER2 MAGERR_APER1 MAGERR_APER2'
#adam-watch# print 'combine_cats| adam-look (no -t input) command_ldacjoinkey=',command_ldacjoinkey
ooo=os.system(command_ldacjoinkey)
if ooo!=0: raise Exception("the line os.system(command_ldacjoinkey) failed\ncommand_ldacjoinkey="+command_ldacjoinkey)
tables[catalog['im_type']] = pyfits.open(allconv)
#if filter == filters[0]:
# tables['notag'] = pyfits.open('' + search_params['TEMPDIR'] + 'all.conv' )
for catalog in cats:
for i in xrange(len(tables[catalog['im_type']][1].columns)):
print 'combine_cats| catalog["im_type"]=',catalog["im_type"] , ' catalog["cat"]=',catalog["cat"]
if catalog['im_type'] != '':
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name + catalog['im_type']
else:
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name
cols.append(tables[catalog['im_type']][1].columns[i])
print 'combine_cats| cols=',cols
print 'combine_cats| len(cols)=',len(cols)
hdu = pyfits.PrimaryHDU()
hduIMHEAD = pyfits.BinTableHDU.from_columns(tables[catalog['im_type']][2].columns)
hduOBJECTS = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header["EXTNAME"]='FIELDS'
hdulist[2].header["EXTNAME"]='OBJECTS'
print 'combine_cats| file=',file
res = re.split('/',outfile)
if not os.path.isdir(reduce(lambda x,y: x + '/' + y,res[:-1])):
ooo=os.system('mkdir -p ' + reduce(lambda x,y: x + '/' + y,res[:-1]))
if ooo!=0: raise Exception("the line os.system('mkdir -p ' + reduce(lambda x,y: x + '/' + y,res[:-1])) failed\nreduce(lambda x,y: x + '/' + y,res[:-1])="+reduce(lambda x,y: x + '/' + y,res[:-1]))
hdulist.writeto(outfile,overwrite=True)
print 'combine_cats| outfile=',outfile , '$#######$'
print "combine_cats| DONE with func"
except:
ns.update(locals())
raise
def paste_cats(cats,outfile,index=2): #simple #step2_sextract
'''inputs: cats,outfile,index=1 or 2 (depending on whether you want just an OBJECTS table, or a FIELDS and OBJECTS table)
returns:
purpose: concatenates all of the ldac catalogs in `cats` and saves them to `outfile`
calls:
called_by: sextract'''
print 'paste_cats| START the func. inputs: cats=',cats , ' outfile=',outfile , ' index=',index
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
table = pyfits.open(cats[0])
data = []
nrows = 0
good_cats = []
''' get rid of empty tables '''
for catalog in cats:
cattab = pyfits.open(catalog)
if not str(type(cattab[index].data)) == "<type 'NoneType'>":
good_cats.append(catalog)
cats = good_cats
for catalog in cats:
cattab = pyfits.open(catalog)
nrows += cattab[index].data.shape[0]
hduOBJECTS = pyfits.BinTableHDU.from_columns(table[index].columns, nrows=nrows)
rowstart = 0
rowend = 0
for catalog in cats:
cattab = pyfits.open(catalog)
rowend += cattab[index].data.shape[0]
for i in xrange(len(cattab[index].columns)):
hduOBJECTS.data.field(i)[rowstart:rowend]=cattab[index].data.field(i)
rowstart = rowend
# update SeqNr
print 'paste_cats| rowend=',rowend , ' len(hduOBJECTS.data.field("SeqNr"))=',len(hduOBJECTS.data.field("SeqNr")) , ' len(range(1,rowend+1))=',len(range(1 ,rowend+1))
hduOBJECTS.data.field('SeqNr')[0:rowend]=range(1,rowend+1)
hduIMHEAD = pyfits.BinTableHDU.from_columns(table[1])
print 'paste_cats| cols=',cols
print 'paste_cats| len(cols)=',len(cols)
if index == 2:
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header["EXTNAME"]='FIELDS'
hdulist[2].header["EXTNAME"]='OBJECTS'
elif index == 1:
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduOBJECTS)
hdulist[1].header["EXTNAME"]='OBJECTS'
print 'paste_cats| file=',file
hdulist.writeto(outfile,overwrite=True)
print 'paste_cats| outfile=',outfile
print 'paste_cats| done', '$#######$'
print "paste_cats| DONE with func"
#adam-watch# I wonder if find_seeing and calc_seeing would be more accurate if it just used MYSEEING in the file header
def find_seeing(SUPA,FLAT_TYPE): #step2_sextract
'''inputs: SUPA,FLAT_TYPE
returns:
calls: get_files,initialize,calc_seeing,save_exposure
called_by: analyze,fix_radec'''
print 'find_seeing| START the func. inputs: SUPA=',SUPA , ' FLAT_TYPE=',FLAT_TYPE
dict_files = get_files(SUPA,FLAT_TYPE)
print 'find_seeing| dict_files["file"]=',dict_files["file"]
search_params = initialize(dict_files['FILTER'],dict_files['OBJNAME'])
search_params.update(dict_files)
print 'find_seeing| dict_files["files"]=',dict_files["files"]
trial = True #adam-tmp: this implies DONT FORK!
#if __name__ == '__main__':
# trial = False #adam-tmp
''' quick run through for seeing '''
children = []
for image in search_params['files'][:]:
child = False
if not trial:
child = os.fork()
if child:
children.append(child)
if trial or not child:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
params['ROOT_WEIGHT'] = ROOT.replace('I','')
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print 'find_seeing| ROOT=',ROOT
flagim = (params['file'].replace('.fits','.flag.fits')).replace('SCIENCE','WEIGHTS')
#weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['flagim'] = flagim
params['p_sex']=progs_path['p_sex']
command_sex = "nice %(p_sex)s %(file)s -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE %(flagim)s\
-FLAG_TYPE MAX \
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.unfilt_tmp.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print 'find_seeing| command_sex=',command_sex
ooo=os.system(command_sex)
if ooo!=0: raise Exception("the line os.system(command_sex) failed\ncommand_sex="+command_sex)
### now filter out ones with flags
command_fix = "grep '0$' %(TEMPDIR)s/seeing_%(ROOT)s.unfilt_tmp.cat > %(TEMPDIR)s/seeing_%(ROOT)s.cat " % params
print 'find_seeing| command_fix=',command_fix
ooo=os.system(command_fix)
if ooo!=0: raise Exception("the line os.system(command_fix) failed\ncommand_fix="+command_fix)
if not trial:
os._exit(0)
for child in children:
os.waitpid(child,0)
#adam-old# command_cat = 'cat ' + search_params['TEMPDIR'] + '/seeing_' + SUPA.replace('I','*I') + '*cat > ' + search_params['TEMPDIR'] + '/paste_seeing_' + SUPA.replace('I','*I') + '.cat'
#adam-old# ooo=utilities.run(command_cat)
#adam-old# if ooo!=0: raise Exception("the line utilities.run(command_cat) failed\ncommand_cat="+command_cat)
#adam-old# file_seeing = search_params['TEMPDIR'] + '/paste_seeing_' + SUPA.replace('I','*I') + '.cat'
PIXSCALE = float(search_params['PIXSCALE'])
print 'find_seeing| running calc_seeing(file_seeing,PIXSCALE)'
##get seeing from header keyword here: params['file']
dd=utilities.get_header_kw(params['file'],['MYSEEING'])
fwhm=dd['MYSEEING']
#adam-old# fwhm = calc_seeing(file_seeing,PIXSCALE)
save_exposure({'fwhm':fwhm},SUPA,FLAT_TYPE)
#print 'find_seeing| file_seeing=',file_seeing , ' SUPA=',SUPA , ' PIXSCALE=',PIXSCALE , ' fwhm=',fwhm
#command_grep="grep "+SUPA+" CRNitschke_final_*.txt"
print 'find_seeing| (adam-look) file=',params['file'] , 'SUPA=',SUPA , ' fwhm=',fwhm
print 'find_seeing| DONE with func'
def calc_seeing(infile,PIXSCALE): #step2_sextract
'''inputs: infile,PIXSCALE
returns: fwhm
calls:
called_by: find_seeing'''
print 'calc_seeing| START the func. inputs: infile=',infile , ' PIXSCALE=',PIXSCALE
#set up bins
binsize = 0.03
nbins = int((3.0-0.3)/binsize+0.5)
bin = scipy.zeros(nbins)
# for each line get fwhm
for line in open(infile,'r').readlines():
tokens = line.split()
fwhm_obj = float(tokens[2])
flag = float(tokens[3])
# make sure flag is zero and the seeing is reasonable
if 3.0 > fwhm_obj*PIXSCALE > 0.3 and flag == 0:
actubin = int((fwhm_obj * PIXSCALE - 0.3)/binsize)
bin[actubin] += 1
# find max
max = 0
k = 0
nobjs = 0
for i in range(nbins):
nobjs += bin[i]
if bin[i]>max:
k=i
max = bin[i]
# set the fwhm
fwhm = 0.3 + k*binsize
# check that its ok
if nobjs < 100:
fwhm = -999
print 'calc_seeing| DONE with func'
return fwhm
def fix_radec(SUPA,FLAT_TYPE): #intermediate #step2_sextract
'''inputs: SUPA,FLAT_TYPE
returns: 1 (if func runs properly), -1 (if no RefCat/2MASS headers found), -2 (if RefCat/2MASS headers have no OBJECTS in them)
purpose: Run sextractor to get stars and their (x,y) positions at the chip-level. Then get the (x,y) positions at the SUPA-level and finally fix the RA/DEC coords of OBJECTS in the RefCat/2MASS headers
calls: get_files,initialize,length_swarp,find_seeing,sextract,get_files,initialize,save_exposure,save_exposure,save_exposure
called_by: get_astrom_run_sextract,match_OBJNAME'''
print 'fix_radec| START the func. inputs: SUPA=',SUPA , ' FLAT_TYPE=',FLAT_TYPE
ppid = str(os.getppid())
#chips = length(SUPA,FLAT_TYPE)
dict_radec = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict_radec['FILTER'],dict_radec['OBJNAME'])
search_params.update(dict_radec)
chips = {}
NUMS = []
at_least_one = False
print 'fix_radec| files:', dict_radec['files']
for image in dict_radec['files']:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print 'fix_radec| NUM=',NUM , 'BASE=',BASE , 'ROOT=',ROOT , 'image=',image
## get the correct gain for the configuration
dd=utilities.get_header_kw(image,['CONFIG'])
config=dd['CONFIG']
params['GAIN'] = config_dict['GAIN'][config]
print "fix_radec| config=",config," params['GAIN']=",params['GAIN']
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
res = re.split('SCIENCE',image)
print 'fix_radec| res=',res
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
print 'fix_radec| res=',res
params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
params['fil_directory'] = res[-1]
print 'fix_radec| params["fil_directory"]=',params["fil_directory"]
res = re.split('_',res[-1])
''' if three second exposure, use the headers in the directory '''
print 'fix_radec| dict_radec["fil_directory"]=',dict_radec["fil_directory"]
if string.find(params['fil_directory'],'CALIB') != -1:
params['directory'] = params['fil_directory']
else:
params['directory'] = res[0]
print 'fix_radec| params["directory"]=',params["directory"]
print 'fix_radec| BASE=',BASE
SDSS_R6 = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params # it's not a ZERO!!!
PANSTARRS = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_PANSTARRS/%(BASE)s.head" % params # it's not a ZERO!!!
SDSS_R9 = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_SDSS-R9/%(BASE)s.head" % params # it's not a ZERO!!!
TWOMASS = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_2MASS/%(BASE)s.head" % params
NOMAD = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_NOMAD*/%(BASE)s.head" % params
SDSS_R6 = SDSS_R6.replace('I_','_').replace('I.','.')
PANSTARRS = PANSTARRS.replace('I_','_').replace('I.','.')
SDSS_R9 = SDSS_R9.replace('I_','_').replace('I.','.')
print 'fix_radec| looking for SCAMP header'
| |
value: `Distance`(metric=max_abs_diff)
:type: `Function`
termination_measure_value
see `termination_measure_value <TransferMechanism.termination_measure_value>`
:default value: 0.0
:type: ``float``
:read only: True
termination_threshold
see `termination_threshold <TransferMechanism.termination_threshold>`
:default value: None
:type:
"""
integrator_mode = Parameter(False, setter=_integrator_mode_setter, valid_types=bool)
integration_rate = FunctionParameter(
0.5,
function_name='integrator_function',
function_parameter_name='rate',
primary=True,
)
initial_value = FunctionParameter(
None,
function_name='integrator_function',
function_parameter_name='initializer'
)
integrator_function = Parameter(AdaptiveIntegrator, stateful=False, loggable=False)
function = Parameter(Linear, stateful=False, loggable=False, dependencies='integrator_function')
integrator_function_value = Parameter([[0]], read_only=True)
on_resume_integrator_mode = Parameter(CURRENT_VALUE, stateful=False, loggable=False)
clip = None
noise = FunctionParameter(0.0, function_name='integrator_function')
termination_measure = Parameter(
Distance(metric=MAX_ABS_DIFF),
modulable=False,
stateful=False,
loggable=False
)
termination_threshold = Parameter(None, modulable=True)
termination_comparison_op = Parameter(LESS_THAN_OR_EQUAL, modulable=False, loggable=False)
termination_measure_value = Parameter(0.0, modulable=False, read_only=True)
output_ports = Parameter(
[RESULTS],
stateful=False,
loggable=False,
read_only=True,
structural=True,
)
def _validate_variable(self, variable):
if 'U' in str(variable.dtype):
return 'may not contain non-numeric entries'
def _validate_clip(self, clip):
if clip:
if (not (isinstance(clip, (list,tuple)) and len(clip)==2
and all(isinstance(i, numbers.Number)) for i in clip)):
return 'must be a tuple with two numbers.'
if not clip[0] < clip[1]:
return 'first item must be less than the second.'
def _parse_clip(self, clip):
if clip:
return tuple(clip)
def _validate_integrator_mode(self, integrator_mode):
if not isinstance(integrator_mode, bool):
return 'may only be True or False.'
def _validate_integration_rate(self, integration_rate):
integration_rate = convert_to_np_array(integration_rate)
if not all_within_range(integration_rate, 0, 1):
return 'must be an int or float in the interval [0,1]'
def _validate_termination_measure(self, termination_measure):
if not isinstance(termination_measure, TimeScale) and not is_function_type(termination_measure):
return f"must be a function or a TimeScale."
def _parse_termination_measure(self, termination_measure):
if isinstance(termination_measure, type):
return termination_measure()
return termination_measure
def _validate_termination_threshold(self, termination_threshold):
if (termination_threshold is not None
and not isinstance(termination_threshold, (int, float))):
return 'must be a float or int.'
def _validate_termination_comparison_op(self, termination_comparison_op):
if (termination_comparison_op not in comparison_operators.keys()
and termination_comparison_op not in comparison_operators.values()):
return f"must be boolean comparison operator or one of the following strings:" \
f" {','.join(comparison_operators.keys())}."
@tc.typecheck
def __init__(self,
default_variable=None,
size=None,
input_ports:tc.optional(tc.any(Iterable, Mechanism, OutputPort, InputPort))=None,
function=None,
noise=None,
clip=None,
integrator_mode=None,
integrator_function=None,
initial_value=None,
integration_rate=None,
on_resume_integrator_mode=None,
termination_measure=None,
termination_threshold:tc.optional(tc.any(int, float))=None,
termination_comparison_op: tc.optional(tc.any(str, is_comparison_operator)) = None,
output_ports:tc.optional(tc.any(str, Iterable))=None,
params=None,
name=None,
prefs: tc.optional(is_pref_set) = None,
**kwargs):
"""Assign type-level preferences and call super.__init__
"""
# Default output_ports is specified in constructor as a string rather than a list
# to avoid "gotcha" associated with mutable default arguments
# (see: bit.ly/2uID3s3 and http://docs.python-guide.org/en/latest/writing/gotchas/)
if output_ports is None or output_ports == RESULTS:
output_ports = [RESULTS]
initial_value = self._parse_arg_initial_value(initial_value)
self._current_variable_index = 0
super(TransferMechanism, self).__init__(
default_variable=default_variable,
size=size,
input_ports=input_ports,
output_ports=output_ports,
initial_value=initial_value,
noise=noise,
integration_rate=integration_rate,
integrator_mode=integrator_mode,
clip=clip,
termination_measure=termination_measure,
termination_threshold=termination_threshold,
termination_comparison_op=termination_comparison_op,
integrator_function=integrator_function,
on_resume_integrator_mode=on_resume_integrator_mode,
function=function,
params=params,
name=name,
prefs=prefs,
**kwargs
)
def _parse_arg_initial_value(self, initial_value):
return self._parse_arg_variable(initial_value)
def _parse_termination_measure_variable(self, variable):
# compares to previous value
# NOTE: this method is for shaping, not for computation, and
# a previous value should not be passed through here
return np.array([variable, variable])
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate FUNCTION and Mechanism params
"""
super()._validate_params(request_set=request_set, target_set=target_set, context=context)
# Validate FUNCTION
if self.parameters.function._user_specified:
transfer_function = self.defaults.function
transfer_function_class = None
# FUNCTION is a Function
if isinstance(transfer_function, Function):
transfer_function_class = transfer_function.__class__
# FUNCTION is a class
elif inspect.isclass(transfer_function):
transfer_function_class = transfer_function
if issubclass(transfer_function_class, Function):
if not issubclass(transfer_function_class, (TransferFunction, SelectionFunction, UserDefinedFunction)):
raise TransferError(f"Function specified as {repr(FUNCTION)} param of {self.name} "
f"({transfer_function_class.__name__}) must be a "
f"{' or '.join([TRANSFER_FUNCTION_TYPE, SELECTION_FUNCTION_TYPE])}.")
elif not isinstance(transfer_function, (types.FunctionType, types.MethodType)):
raise TransferError(f"Unrecognized specification for {repr(FUNCTION)} param "
f"of {self.name} ({transfer_function}).")
# FUNCTION is a function or method, so test that shape of output = shape of input
if isinstance(transfer_function, (types.FunctionType, types.MethodType, UserDefinedFunction)):
var_shape = self.defaults.variable.shape
if isinstance(transfer_function, UserDefinedFunction):
val_shape = transfer_function._execute(self.defaults.variable, context=context).shape
else:
val_shape = np.array(transfer_function(self.defaults.variable, context=context)).shape
if val_shape != var_shape:
raise TransferError(f"The shape ({val_shape}) of the value returned by the Python function, "
f"method, or UDF specified as the {repr(FUNCTION)} param of {self.name} "
f"must be the same shape ({var_shape}) as its {repr(VARIABLE)}.")
# IMPLEMENTATION NOTE:
# Need to validate initial_value and integration_rate here (vs. in Parameters._validate_XXX)
# as they must be compared against default_variable if it was user-specified
# which is not available in Parameters _validation.
# Validate INITIAL_VALUE
if INITIAL_VALUE in target_set and target_set[INITIAL_VALUE] is not None:
initial_value = np.array(target_set[INITIAL_VALUE])
if (
not iscompatible(initial_value, self.defaults.variable)
# extra conditions temporary until universal initializer
# validation is developed
and initial_value.shape != self.integrator_function.defaults.variable.shape
and self._get_parsed_variable(self.parameters.integrator_function,
initial_value).shape != self.integrator_function.defaults.variable.shape
):
raise TransferError(f"The format of the initial_value parameter for {append_type_to_name(self)} "
f"({initial_value}) must match its variable ({self.defaults.variable}).")
# Validate INTEGRATION_RATE:
if INTEGRATION_RATE in target_set and target_set[INTEGRATION_RATE] is not None:
integration_rate = np.array(target_set[INTEGRATION_RATE])
if (not np.isscalar(integration_rate.tolist())
and integration_rate.shape != self.defaults.variable.squeeze().shape):
raise TransferError(f"{repr(INTEGRATION_RATE)} arg for {self.name} ({integration_rate}) "
f"must be either an int or float, or have the same shape "
f"as its {VARIABLE} ({self.defaults.variable}).")
# Validate NOISE:
if NOISE in target_set:
self._validate_noise(target_set[NOISE])
# Validate INTEGRATOR_FUNCTION:
if INTEGRATOR_FUNCTION in target_set and target_set[INTEGRATOR_FUNCTION] is not None:
integtr_fct = target_set[INTEGRATOR_FUNCTION]
if not (isinstance(integtr_fct, IntegratorFunction)
or (isinstance(integtr_fct, type) and issubclass(integtr_fct, IntegratorFunction))):
raise TransferError(f"The function specified for the {repr(INTEGRATOR_FUNCTION)} arg of {self.name} "
f"({integtr_fct}) must be an {IntegratorFunction.__class__.__name__}.")
# FIX: CONSOLIDATE THIS WITH StatefulFunction._validate_noise
def _validate_noise(self, noise):
# Noise is a scalar, list, array or DistributionFunction
if isinstance(noise, DistributionFunction):
noise = noise.execute
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable)
and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1):
raise MechanismError(f"Noise parameter ({noise}) for '{self.name}' does not match default variable "
f"({self.defaults.variable}); it must be specified as a float, a function, "
f"or an array of the appropriate shape "
f"({np.shape(np.array(self.defaults.variable))}).")
else:
for i in range(len(noise)):
if isinstance(noise[i], DistributionFunction):
noise[i] = noise[i].execute
if (not np.isscalar(noise[i]) and not callable(noise[i])
and not iscompatible(np.atleast_2d(noise[i]), self.defaults.variable[i])
and not iscompatible(np.atleast_1d(noise[i]), self.defaults.variable[i])):
raise MechanismError(f"The element '{noise[i]}' specified in 'noise' for {self.name} "
f"is not valid; noise must be list or array must be floats or functions.")
elif _is_control_spec(noise):
pass
# Otherwise, must be a float, int or function
elif noise is not None and not isinstance(noise, (float, int)) and not callable(noise):
raise MechanismError(f"Noise parameter ({noise}) for {self.name} must be a float, "
f"function, or array/list of these.")
def _instantiate_parameter_ports(self, function=None, context=None):
# If function is a logistic, and clip has not been specified, bound it between 0 and 1
if (
(
isinstance(function, Logistic)
or (
inspect.isclass(function)
and issubclass(function, Logistic)
)
)
and self.clip is None
):
self.clip = (0,1)
super()._instantiate_parameter_ports(function=function, context=context)
def _instantiate_attributes_before_function(self, function=None, context=None):
super()._instantiate_attributes_before_function(function=function, context=context)
if self.parameters.initial_value._get(context) is None:
self.defaults.initial_value = copy.deepcopy(self.defaults.variable)
self.parameters.initial_value._set(copy.deepcopy(self.defaults.variable), context)
def _instantiate_output_ports(self, context=None):
# If user specified more than one item for variable, but did not specify any custom OutputPorts,
# then assign one OutputPort (with the default name, indexed by the number of the item) per item of variable
if len(self.output_ports) == 1 and self.output_ports[0] == RESULTS:
if len(self.defaults.variable) == 1:
output_ports = [RESULT]
else:
output_ports = []
for i, item in enumerate(self.defaults.variable):
output_ports.append({NAME: f'{RESULT}-{i}', VARIABLE: (OWNER_VALUE, i)})
self.parameters.output_ports._set(output_ports, context)
super()._instantiate_output_ports(context=context)
# # Relabel first output_port:
# # default (assigned by Mechanism's OutputPort registry) is to name it "RESULT";
# # but in this context, explicitly adding -0 index helps put first one on par with others
# # (i.e., make clear the alignment of each OutputPort with the items of the TransferMechanmism's value).
# remove_instance_from_registry(registry=self._portRegistry,
# category=OUTPUT_PORT,
# component=self.output_ports['RESULT'])
# register_instance(self.output_ports['RESULT'], 'RESULT-0', OutputPort, self._portRegistry, OUTPUT_PORT)
def _get_instantaneous_function_input(self, function_variable, noise, context=None):
noise = self._try_execute_param(noise, function_variable, context=context)
if noise is not None and not safe_equals(noise, 0):
current_input = function_variable + noise
else:
current_input = function_variable
return current_input
def _clip_result(self, clip, current_input):
if clip is not None:
minCapIndices = np.where(current_input < clip[0])
maxCapIndices = np.where(current_input > clip[1])
current_input[minCapIndices] = np.min(clip)
current_input[maxCapIndices] = np.max(clip)
return current_input
def _gen_llvm_is_finished_cond(self, ctx, builder, params, state):
current = pnlvm.helpers.get_state_ptr(builder, self, state, "value")
threshold_ptr = pnlvm.helpers.get_param_ptr(builder, self, params,
"termination_threshold")
if isinstance(threshold_ptr.type.pointee, pnlvm.ir.LiteralStructType):
# Threshold is not defined, return the old value of finished flag
assert len(threshold_ptr.type.pointee) == 0
is_finished_ptr = pnlvm.helpers.get_state_ptr(builder, self, state,
"is_finished_flag")
is_finished_flag = builder.load(is_finished_ptr)
return builder.fcmp_ordered("!=", is_finished_flag,
is_finished_flag.type(0))
# If modulated, termination threshold is single element array
if isinstance(threshold_ptr.type.pointee, pnlvm.ir.ArrayType):
assert len(threshold_ptr.type.pointee) == 1
threshold_ptr = builder.gep(threshold_ptr, [ctx.int32_ty(0),
ctx.int32_ty(0)])
threshold = builder.load(threshold_ptr)
cmp_val_ptr = builder.alloca(threshold.type)
if self.termination_measure is max:
assert self._termination_measure_num_items_expected == 1
# | |
# -*- coding: utf-8 -*-
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import numpy as np
from ...commons import builtins
from ...commons.basic_graph_ops import disconnect_edge, connect_edge, \
delete_node, replace_node, connect_dests, topsort
from ...nnssa import ParsedNode
ELEMENTWISE_OPS = [
'Maximum',
'Minimum',
'Add',
'Sub',
'BiasAdd',
'Mul',
'Sigmoid',
'Relu',
'LeakyRelu',
'Tanh',
'Identity',
'Sqrt',
'Rsqrt',
'Pow',
]
def _check_number_inputs(node, n):
return len(node.inputs) == n
def _check_number_outputs(node, n):
return len(node.outputs) == n
def _check_single_out_vector_constant_node(node):
return(node.op == 'Const' and len(node.outputs) == 1 and \
node.value is not None and len(np.squeeze(node.value.val).shape) == 1)
def _is_NHWC(graph, node):
if (node.op == 'ResizeBilinear' or node.op == 'ResizeNearestNeighbor'):
return True
if (node.op == 'Conv2D' or node.op == 'Pooling' or node.op =='MaxPool' or \
node.op == 'AvgPool') and node.attr.get('data_format') == 'NHWC':
return True
if node.op == 'ConcatV2':
# ConcatV2's last input is axis
return all(graph[inp].attr.get('data_format') == 'NHWC_format_inserted' for inp in
node.inputs[:-1])
if node.op in ELEMENTWISE_OPS:
# if its an elementwise op
# and if all of its parent(s) are "NHWC_format_inserted" or
# given that at least one of the parents is "NHWC_format_inserted" and rest are
# vector constants, then the node is also
# declared to be "NHWC_format_inserted"
NHWC_parent = any(
[graph[inp].attr.get('data_format', None) == 'NHWC_format_inserted' for inp in node.inputs])
if NHWC_parent:
for inp in node.inputs:
parent_node = graph[inp]
if parent_node.attr.get('data_format', None) == 'NHWC_format_inserted':
continue
elif parent_node.value is not None:
# check that the input is a constant and a vector (rank 1)
val = np.array(parent_node.value.val)
if len(val.shape) == 1 and builtins.is_tensor(parent_node.datatype) and len(parent_node.outputs) == 1:
continue
else:
return False
else:
return False
return True
return False
def _insert_transpose_to_or_from_nchw(graph, src, dst, transpose_node_name, transpose_params=[0,3,1,2]):
'''
Insert a node called "transpose_node_name" between src and dst
This node should be a transpose node with params "transpose_params"
'''
# First check whether the node already exists in the graph or not.
if transpose_node_name in graph:
tp_node = graph[transpose_node_name]
if dst.name not in tp_node.outputs:
tp_node.outputs.append(dst.name)
else:
# the node does not exist, so create a fresh one
tp_node = ParsedNode()
tp_node.op = 'Transpose'
tp_node.name = transpose_node_name
# Adjust type inference
if builtins.is_tensor(src.datatype):
s = src.datatype.get_shape()
if len(s) == 4:
tp_shape = tuple([s[transpose_params[0]], s[transpose_params[1]], s[transpose_params[2]], s[transpose_params[3]]])
tp_node.datatype = builtins.tensor(src.datatype.get_primitive(), tp_shape)
tp_node.inputs = [src.name]
tp_node.outputs = [dst.name]
tp_node.attr['dim'] = transpose_params
if '_output_shapes' in src.attr:
input_shape = src.attr['_output_shapes'][0]
tp_node.attr['_output_shapes'] = [[input_shape[transpose_params[0]],input_shape[transpose_params[1]],input_shape[transpose_params[2]],input_shape[transpose_params[3]]]]
graph[transpose_node_name] = tp_node
# Rename dst's input 'src' to 'transpose_node_name'
for idx, inp in enumerate(dst.inputs):
if inp == src.name:
dst.inputs[idx] = transpose_node_name
break
# Rename src's output from 'dst' to 'transpose_node_name'
if transpose_node_name in src.outputs:
# 'transpose_node_name' already exists as an output of the src,
# we just need to delete dst node from the output list of src, instead of replacing it
if dst.name in src.outputs:
src.outputs.remove(dst.name)
else:
for idx, outp in enumerate(src.outputs):
if outp == dst.name:
src.outputs[idx] = transpose_node_name
break
def _insert_transpose_to_nchw(graph, src, dst):
tp_node_name = src.name + "_to_nchw"
_insert_transpose_to_or_from_nchw(graph, src, dst, tp_node_name, [0,3,1,2])
def _insert_transpose_from_nchw(graph, src, dst):
tp_node_name = src.name + "_to_nhwc"
_insert_transpose_to_or_from_nchw(graph, src, dst, tp_node_name, [0,2,3,1])
def transform_nhwc_to_nchw(nnssa):
"""
Mark each one of the node with "NHWC", so that the conversion process
could avoid inserting unnecessary transpositions.
A node's format is "NHWC" if and only if:
(1) it is a conv or pooling or image_resize layer with "NHWC" data format
(2) it is a rank-preserving operation whose inputs are all "NHWC"
"""
for fn_key in list(nnssa.functions.keys()):
graph = nnssa.functions[fn_key].graph
# this pass needs the ssa to be in the topologically sorted order
node_names = topsort(graph)
# Mark all NHWC nodes
nhwc_nodes = []
for name in node_names:
node = graph[name]
if len(node.outputs) > 0 and len(node.inputs) > 0 and _is_NHWC(graph, node):
node.attr['data_format'] = 'NHWC_format_inserted'
nhwc_nodes.append(name)
for name in nhwc_nodes:
node = graph[name]
# Adjust type inference
if builtins.is_tensor(node.datatype):
s = node.datatype.get_shape()
if len(s) == 4:
new_shape = tuple([s[0], s[3], s[1], s[2]])
node.datatype = builtins.tensor(node.datatype.get_primitive(), new_shape)
node.attr['symbolic_datatype'] = node.datatype
if '_output_shapes' in node.attr:
orig_out_shapes = node.attr['_output_shapes']
if len(orig_out_shapes) == 1 and len(orig_out_shapes[0]) == 4:
s = orig_out_shapes[0]
node.attr['_output_shapes'] = [[s[0], s[3], s[1], s[2]]]
if node.op in ELEMENTWISE_OPS:
for inp in node.inputs:
parent_node = graph[inp]
if parent_node.value is not None:
val = np.array(parent_node.value.val)
if len(val.shape) == 1 and builtins.is_tensor(parent_node.datatype):
parent_node.datatype = builtins.tensor(parent_node.datatype.get_primitive(),
(1, val.shape[0], 1, 1))
parent_node.value.val = np.reshape(parent_node.value.val, (1, val.shape[0], 1, 1))
# Insert NHWC->NCHW transpose
for i, inp_node_name in enumerate(node.inputs):
inp_node_format = graph[inp_node_name].attr.get('data_format')
if graph[inp_node_name].op == 'Const':
# Const weights and parameters
continue
if inp_node_format != 'NHWC_format_inserted':
_insert_transpose_to_nchw(graph, graph[inp_node_name], node)
# Insert NCHW->NHWC transpose
for i, out_node_name in enumerate(node.outputs):
out_node_format = graph[out_node_name].attr.get('data_format')
if out_node_format != 'NHWC_format_inserted':
_insert_transpose_from_nchw(graph, node, graph[out_node_name])
# Adjust output shape and concat layer's axis parameter
if node.op == 'ConcatV2' and len(node.inputs) > 1 and graph[node.inputs[-1]].value is not None:
axis = graph[node.inputs[-1]].value.val
axis = 4 + axis if axis < 0 else axis
if axis == 3:
node.attr['axis'] = 1
elif axis == 2 or axis == 1:
node.attr['axis'] = axis + 1
else:
node.attr['axis'] = axis
def fuse_bias_add(nnssa):
# look for 'BiasAdd' nodes following 'MatMul' or 'Conv2D'. If the other input in
# 'BiasAdd' is coming from a const node, then copy the value of that const
# in the parent and remove the 'BiasAdd', i.e. connect its children
# to its parent.
for fn_key in list(nnssa.functions.keys()):
f = nnssa.functions[fn_key]
keys = list(f.graph.keys())
nodes_fused = []
for k in keys:
if k not in f.graph:
continue
current_node = f.graph[k]
if current_node.op == 'BiasAdd' and len(current_node.inputs) == 2:
parent_node = f.graph[current_node.inputs[0]]
second_p_node = f.graph[current_node.inputs[1]]
if (parent_node.op == 'MatMul' or parent_node.op == 'Conv2D' and len(parent_node.outputs) == 1) and \
(second_p_node.value is not None and len(second_p_node.outputs) == 1 and second_p_node.outputs[0] == k):
parent_node.attr['bias'] = second_p_node.value.val
disconnect_edge(f.graph, second_p_node.name, k) # disconnect the const
disconnect_edge(f.graph, parent_node.name, k) # disconnect the first parent
for out_node in current_node.outputs:
f.graph[parent_node.name].outputs.append(out_node)
if current_node.name in f.graph[out_node].inputs:
idx = f.graph[out_node].inputs.index(current_node.name)
f.graph[out_node].inputs[idx] = parent_node.name
else:
raise ValueError('[Op Fusion] fuse_bias_add() cannot identify biasAdd output.')
nodes_fused.append(k)
nodes_fused.append(second_p_node.name)
for nf in nodes_fused:
delete_node(f.graph, nf)
if len(nodes_fused) > 0:
print("[Op Fusion] fuse_bias_add() deleted {} nodes.".format(len(nodes_fused)))
def onehot_matmul_to_embedding(nnssa):
# Look for 'MatMul' whose first input is 'OneHot'
# and replace it with embedding op
for fn_key in list(nnssa.functions.keys()):
f = nnssa.functions[fn_key]
keys = list(f.graph.keys())
for k in keys:
if k not in f.graph:
continue
current_node = f.graph[k]
if len(current_node.inputs) < 1:
continue
inp_node = f.graph[current_node.inputs[0]]
if (current_node.op == 'BatchMatMul' or current_node.op == 'MatMul') and inp_node.op == 'OneHot':
assert len(inp_node.inputs) == 4, 'OneHot node should have 4 inputs'
onehot_params = [f.graph[name].attr.get('value') for name in inp_node.inputs[1:]]
depth_val, on_val, off_val = [x.val[0] for x in onehot_params]
# Change the current node operation to Embedding
current_node.op = 'Embedding'
current_node.attr['depth'] = depth_val
current_node.attr['on_value'] = on_val
current_node.attr['off_value'] = off_val
# Replace OneHot with its first input
onehot_inp_node_names = inp_node.inputs[:]
replace_node(f.graph, inp_node.name, onehot_inp_node_names[0])
# Now delete the OneHot node and other input nodes
delete_node(f.graph, onehot_inp_node_names[1])
print('[Op Fusion] Node %s is removed.' %(onehot_inp_node_names[1]))
delete_node(f.graph, onehot_inp_node_names[2])
print('[Op Fusion] Node %s is removed.' %(onehot_inp_node_names[2]))
delete_node(f.graph, onehot_inp_node_names[3])
print('[Op Fusion] Node %s is removed.' %(onehot_inp_node_names[3]))
delete_node(f.graph, inp_node.name)
print('[Op Fusion] Node %s is removed.' %(inp_node.name))
def _search_nodes_by_type(gf, node_names, op_type):
for name in node_names:
if gf[name].op == op_type:
return gf[name]
def _match_layernorm_pattern(gf, entry_node):
""" Return the nodes that form the subgraph of a LayerNormalization layer
"""
def _axes_in_range(axes, rank):
return all([x in range(-rank, rank) for x in axes])
try:
params = {}
mean_1 = _search_nodes_by_type(gf, entry_node.outputs, 'Mean')
sqdiff_2 = _search_nodes_by_type(gf, entry_node.outputs, 'SquaredDifference')
mul_3 = _search_nodes_by_type(gf, entry_node.outputs, 'Mul')
if not (mean_1.op == 'Mean' and sqdiff_2.op == 'SquaredDifference' and
mul_3.op == 'Mul'):
return None
const_4 = gf[mean_1.inputs[1]]
mean_1_rank = len(mean_1.datatype.get_shape())
if not (const_4.op == 'Const' and len(const_4.value.val) == 1 and
_axes_in_range(const_4.value.val, mean_1_rank)):
return None
axes = const_4.value.val
mean_5 = gf[sqdiff_2.outputs[0]]
if not (mean_5.op == 'Mean'):
return None
const_6 = gf[mean_5.inputs[1]]
mean_5_rank = len(mean_5.datatype.get_shape())
if not (const_6.op == 'Const' and len(const_6.value.val) == 1 and
axes == const_6.value.val):
return None
axes = sorted([x if x > 0 else mean_1_rank - x for x in
const_4.value.val])
ref_axes = list(range(mean_1_rank-len(axes), mean_1_rank))
if not all([x == y for (x,y) in zip(axes, ref_axes)]):
| |
<filename>ev3sim/attach_bot.py
import multiprocessing
import importlib
from os import getcwd
from queue import Empty, Queue as NonMultiQueue
import sys
from time import sleep
from unittest import mock
from ev3sim.constants import *
from ev3dev2 import Device, DeviceNotFound
cur_events = NonMultiQueue()
tick = 0
tick_rate = 30
current_data = {}
last_checked_tick = -1
communications_messages = NonMultiQueue()
input_messages = NonMultiQueue()
def safe_patch(mname, cname, obj):
try:
getattr(importlib.import_module(mname), cname.split(".", 1)[0])
except Exception as e:
return lambda f: f
return mock.patch(f"{mname}.{cname}", obj)
def attach_bot(robot_id, filename, fake_roots, result_queue, result_queue_internal, rq, rq_internal, sq, sq_internal):
result_queue._internal_size = result_queue_internal
rq._internal_size = rq_internal
sq._internal_size = sq_internal
called_from = getcwd()
try:
sleep_builtin = sleep
def print_mock(*objects, sep=" ", end="\n"):
message = sep.join(str(obj) for obj in objects) + end
sq.put(
(
MESSAGE_PRINT,
{
"robot_id": robot_id,
"data": message,
},
)
)
def format_print_mock(*objects, alive_id=None, life=3, sep=" ", end="\n"):
message = sep.join(str(obj) for obj in objects) + end
sq.put(
(
MESSAGE_PRINT,
{
"robot_id": robot_id,
"data": message,
"kwargs": {
"alive_id": alive_id,
"life": life,
},
},
)
)
@mock.patch("builtins.print", print_mock)
@mock.patch("ev3sim.code_helpers.format_print", format_print_mock)
def run_code(fname, fake_roots, recv_q: multiprocessing.Queue, send_q: multiprocessing.Queue):
### TIMING FUNCTIONS
def handle_recv(msg_type, msg):
global tick, tick_rate, current_data, cur_events
if msg_type == SIM_DATA:
tick = msg["tick"]
tick_rate = msg["tick_rate"]
current_data = msg["data"]
if isinstance(current_data, str):
# Not pretty but it works.
e = Exception(current_data)
raise e
for ev in msg["events"]:
cur_events.put(ev)
return msg_type, msg
elif msg_type == SIM_INPUT:
input_messages.put((msg_type, msg))
else:
communications_messages.put((msg_type, msg))
def wait_for_tick():
recved = 0
msg_type = -1
msg = {}
while True:
try:
msg_type, msg = recv_q.get_nowait()
handle_recv(msg_type, msg)
if msg_type != SIM_DATA:
break
recved += 1
except Empty:
# Once we've exhausted the queue, and all of our information has been used, break and deal with the latest msg.
if recved > 0 and send_q.qsize() == 0:
break
sleep_builtin(0.01)
def get_time():
return tick / tick_rate
def sleep(seconds):
cur = get_time()
while True:
elapsed = get_time() - cur
if elapsed >= seconds:
return
wait_for_tick()
def wait_for_msg_of_type(MSG_TYPE):
while True:
try:
msg_type, msg = communications_messages.get_nowait()
if msg_type != MSG_TYPE:
communications_messages.put((msg_type, msg))
wait_for_tick()
else:
return msg
except Empty:
wait_for_tick()
def fake_input(message=None):
sq.put(
(
MESSAGE_INPUT_REQUESTED,
{
"robot_id": robot_id,
"message": str(message) if message is not None else None,
},
)
)
while True:
try:
_, msg = input_messages.get_nowait()
return msg
except Empty:
wait_for_tick()
### COMMUNICATIONS
class MockedCommSocket:
def __init__(self, hostaddr, port, sender_id):
self.hostaddr = hostaddr
self.port = str(port)
self.sender_id = sender_id
def send(self, d):
assert isinstance(d, str), "Can only send string data through simulator."
send_q.put(
(
SEND_DATA,
{
"robot_id": robot_id,
"send_to": self.sender_id,
"connection_string": f"{self.hostaddr}:{self.port}",
"data": d,
},
)
)
wait_for_msg_of_type(SEND_SUCCESS)
def recv(self, buffer):
# At the moment the buffer is ignored.
msg = wait_for_msg_of_type(RECV_DATA)
return msg["data"]
def close(self):
send_q.put(
(
CLOSE_CLIENT,
{
"robot_id": robot_id,
"connection_string": f"{self.hostaddr}:{self.port}",
},
)
)
msg = wait_for_msg_of_type(CLIENT_CLOSED)
class MockedCommClient(MockedCommSocket):
def __init__(self, hostaddr, port):
if hostaddr == "aa:bb:cc:dd:ee:ff":
print(
f"While this example will work, for competition bots please change the host address from {hostaddr} so competing bots can communicate separately."
)
send_q.put(
(
JOIN_CLIENT,
{
"robot_id": robot_id,
"connection_string": f"{hostaddr}:{port}",
},
)
)
msg = wait_for_msg_of_type(SUCCESS_CLIENT_CONNECTION)
sender_id = msg["host_id"]
print(f"Client connected to {sender_id}")
super().__init__(hostaddr, port, sender_id)
def close(self):
super().close()
class MockedCommServer:
def __init__(self, hostaddr, port):
if hostaddr == "aa:bb:cc:dd:ee:ff":
print(
f"While this example will work, for competition bots please change the host address from {hostaddr} so competing bots can communicate separately."
)
self.hostaddr = hostaddr
self.port = str(port)
send_q.put(
(
START_SERVER,
{
"connection_string": f"{self.hostaddr}:{self.port}",
"robot_id": robot_id,
},
)
)
wait_for_msg_of_type(SERVER_SUCCESS)
print(f"Server started on {self.hostaddr}:{self.port}")
self.sockets = []
def accept_client(self):
msg = wait_for_msg_of_type(NEW_CLIENT_CONNECTION)
self.sockets.append(MockedCommSocket(self.hostaddr, self.port, msg["client_id"]))
return self.sockets[-1], (self.hostaddr, self.port)
def close(self):
# Close all clients, then close myself
for socket in self.sockets:
socket.close()
send_q.put(
(
CLOSE_SERVER,
{
"robot_id": robot_id,
"connection_string": f"{self.hostaddr}:{self.port}",
},
)
)
msg = wait_for_msg_of_type(SERVER_CLOSED)
### CODE HELPERS
from ev3sim.code_helpers import CommandSystem
class MockCommandSystem(CommandSystem):
@classmethod
def send_command(cls, command_type, command_data):
send_q.put(
(
BOT_COMMAND,
{
"robot_id": robot_id,
"command_type": command_type,
"payload": command_data,
},
)
)
@classmethod
def handle_events(cls):
"""Since we can only handle events in mocked function calls, define a function to handle all of the existing events."""
while cur_events.qsize():
event_name, event_data = cur_events.get()
if hasattr(cls, event_name):
func = getattr(cls, event_name)
func(event_data)
fake_path = sys.path.copy()
fake_path.append(called_from)
fake_path = fake_roots + fake_path
### EV3DEV2 MOCKS
class MockedFile:
def __init__(self, data_path):
self.k2, self.k3, self.k4 = data_path
self.seek_point = 0
def read(self):
if isinstance(current_data[self.k2][self.k3][self.k4], int):
res = str(current_data[self.k2][self.k3][self.k4])
elif isinstance(current_data[self.k2][self.k3][self.k4], str):
if self.seek_point == 0:
res = current_data[self.k2][self.k3][self.k4]
else:
res = current_data[self.k2][self.k3][self.k4][self.seek_point :]
else:
raise ValueError(
f"Not sure how to handle datatype {type(current_data[self.k2][self.k3][self.k4])}"
)
return res.encode("utf-8")
def seek(self, i):
self.seek_point = i
def write(self, value):
send_q.put((DEVICE_WRITE, (f"{self.k2} {self.k3} {self.k4}", value.decode())))
while self.k4 == "mode" and current_data[self.k2][self.k3][self.k4] != value.decode():
wait_for_tick()
def flush(self):
pass
def device__init__(self, class_name, name_pattern="*", name_exact=False, **kwargs):
self._path = [class_name]
self.kwargs = kwargs
self._attr_cache = {}
def get_index(file):
match = Device._DEVICE_INDEX.match(file)
if match:
return int(match.group(1))
else:
return None
if name_exact:
self._path.append(name_pattern)
self._device_index = get_index(name_pattern)
else:
for name in current_data[self._path[0]].keys():
for k in kwargs:
if k not in current_data[self._path[0]][name]:
break
if isinstance(kwargs[k], list):
if current_data[self._path[0]][name][k] not in kwargs[k]:
break
else:
if current_data[self._path[0]][name][k] != kwargs[k]:
break
else:
self._path.append(name)
self._device_index = get_index(name)
break
else:
# Debug print for adding new devices.
# print(kwargs, data["current_data"][self._path[0]])
self._device_index = None
raise DeviceNotFound("%s is not connected." % self)
def _attribute_file_open(self, name):
return MockedFile((self._path[0], self._path[1], name))
def wait(self, cond, timeout=None):
tic = get_time()
if cond(self.state):
return True
# Register to active_data_handlers so we can do something every tick without lagging.
while True:
wait_for_tick()
res = cond(self.state)
if res or ((timeout is not None) and (get_time() >= tic + timeout / 1000)):
return cond(self.state)
class MockedButton:
class MockedButtonSpecific(Device):
_pressed = None
@property
def pressed(self):
self._pressed, value = self.get_attr_int(self._pressed, "pressed")
return value
button_names = ["up", "down", "left", "right", "enter", "backspace"]
on_up = None
on_down = None
on_left = None
on_right = None
on_enter = None
on_backspace = None
on_change = None
previous_presses = None
def __init__(self):
self.button_classes = {}
for name in self.button_names:
try:
self.button_classes[name] = MockedButton.MockedButtonSpecific("brick_button", address=name)
except Exception as e:
if name == "up":
raise e
self.button_classes[name] = None
@property
def buttons_pressed(self):
pressed = []
for name, obj in self.button_classes.items():
if obj is not None and obj.pressed:
pressed.append(name)
return pressed
@property
def up(self):
if self.button_classes["up"] is None:
raise ValueError("Up button not connected.")
return "up" in self.buttons_pressed
@property
def down(self):
if self.button_classes["down"] is None:
raise ValueError("Down button not connected.")
return "down" in self.buttons_pressed
@property
def left(self):
if self.button_classes["left"] is None:
raise ValueError("Left button not connected.")
return "left" in self.buttons_pressed
@property
def right(self):
if self.button_classes["right"] is None:
raise ValueError("Right button not connected.")
return "right" in self.buttons_pressed
@property
def enter(self):
if self.button_classes["enter"] is None:
raise ValueError("Enter button not connected.")
return "enter" in self.buttons_pressed
@property
def backspace(self):
if self.button_classes["backspace"] is None:
raise ValueError("Backspace button not connected.")
return "backspace" in self.buttons_pressed
def process(self, new_state=None):
if new_state is None:
new_state = set(self.buttons_pressed)
if self.previous_presses is None:
self.previous_presses = new_state
changed_names = new_state.symmetric_difference(self.previous_presses)
for name in changed_names:
bound_method = getattr(self, f"on_{name}")
if bound_method is not None:
bound_method(name in new_state)
if self.on_change is not None and changed_names:
self.on_change([(name, name in new_state) for name in changed_names])
self.previous_presses = new_state
orig_import = __import__
def import_mock(name, *args):
if name in ("fcntl", "evdev"):
return mock.Mock()
return orig_import(name, *args)
def raiseEV3Error():
raise ValueError(
"This simulator is not compatible with ev3dev. Please use ev3dev2: https://pypi.org/project/python-ev3dev2/"
)
@safe_patch("time", "time", get_time)
@safe_patch("time", "sleep", sleep)
@safe_patch("ev3dev2.motor", "Motor.wait", wait)
@safe_patch("ev3dev2", "Device.__init__", device__init__)
@safe_patch("ev3dev2", "Device._attribute_file_open", _attribute_file_open)
@safe_patch("ev3dev2.button", "Button", MockedButton)
@safe_patch("ev3sim.code_helpers", "is_ev3", False)
@safe_patch("ev3sim.code_helpers", "is_sim", True)
@safe_patch("ev3sim.code_helpers", "robot_id", robot_id)
@safe_patch("ev3sim.code_helpers", "wait_for_tick", wait_for_tick)
@safe_patch("ev3sim.code_helpers", "CommServer", MockedCommServer)
@safe_patch("ev3sim.code_helpers", "CommClient", MockedCommClient)
@safe_patch("ev3sim.code_helpers", "wait_for_tick", wait_for_tick)
@safe_patch("builtins", "__import__", import_mock)
@safe_patch("ev3sim.code_helpers", "CommandSystem", MockCommandSystem)
@safe_patch("ev3sim.code_helpers", "EventSystem.handle_events", handle_events)
@safe_patch("sys", "path", fake_path)
@safe_patch("builtins", "input", fake_input)
# These ev3dev2 objects are not implemented in the sim.
@safe_patch("ev3dev2", "led", mock.Mock())
@safe_patch("ev3dev2.led", "Leds", mock.Mock())
@safe_patch("ev3dev2", "sound", mock.Mock())
@safe_patch("ev3dev2.sound", "Sound", mock.Mock())
@safe_patch("ev3dev2", "display", mock.Mock())
@safe_patch("ev3dev2.display", "Display", mock.Mock())
@safe_patch("ev3dev2", "console", mock.Mock())
@safe_patch("ev3dev2.console", "Console", mock.Mock())
# TODO: This should probably actually give reasonable values for voltage/current/amps
@safe_patch("ev3dev2", "power", mock.Mock())
@safe_patch("ev3dev2.power", "Power", mock.Mock())
@safe_patch("ev3dev2", "fonts", mock.Mock())
@safe_patch("ev3dev.core", "Device.__init__", raiseEV3Error)
def run_script(fname):
from importlib.machinery import SourceFileLoader
wait_for_tick()
module = SourceFileLoader("__main__", fname).load_module()
| |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
data_dia_todos=data[data["FECHA"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_3(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_fecha["CANTIDAD"].sum(),data_fecha["P NETO"].sum(),round(data_fecha["TRM"].mean(),2),round(data_fecha["PRECIO PONDERADO"].mean(),2)]],
columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def Mes_espa(mes):
if mes =="01":
Mes="Enero"
elif mes =="02":
Mes="Febrero"
elif mes =="03":
Mes="Marzo"
elif mes =="04":
Mes="Abril"
elif mes =="05":
Mes="Mayo"
elif mes =="06":
Mes="Junio"
elif mes =="07":
Mes="Julio"
elif mes =="08":
Mes="Agosto"
elif mes =="09":
Mes="Septiembre"
elif mes =="10":
Mes="Octubre"
elif mes =="11":
Mes="Noviembre"
elif mes =="12":
Mes="Diciembre"
return Mes
def F_Liq_pag(mes,ano):
if mes%12 ==1:
Fecha ="Enero"
elif mes%12 ==2:
Fecha ="Febrero"
elif mes%12 ==3:
Fecha ="Marzo"
elif mes%12 ==4:
Fecha ="Abril"
elif mes%12 ==5:
Fecha ="Mayo"
elif mes%12 ==6:
Fecha ="Junio"
elif mes%12 ==7:
Fecha ="Julio"
elif mes%12 ==8:
Fecha="Agosto"
elif mes%12 ==9:
Fecha="Septiembre"
elif mes%12 ==10:
Fecha="Octubre"
elif mes%12 ==11:
Fecha="Noviembre"
elif mes%12 ==0:
Fecha="Diciembre"
if mes > 12:
Fecha += " "+ str(ano+1)
else:
Fecha += " "+ str(ano)
return Fecha
def num2money(num):
if num < 1e3:
return str(round(num,2))
elif num < 1e6:
return str(round(num*1e3/1e6,2))+ " miles."
elif num < 1e9:
return str(round(num*1e3/1e9,2))+ " mill."
elif num < 1e12:
return str(round(num*1e3/1e12,2))+ " mil mill."
def mes_espa(mes):
if mes =="01":
Mes="enero"
elif mes =="02":
Mes="febrero"
elif mes =="03":
Mes="marzo"
elif mes =="04":
Mes="abril"
elif mes =="05":
Mes="mayo"
elif mes =="06":
Mes="junio"
elif mes =="07":
Mes="julio"
elif mes =="08":
Mes="agosto"
elif mes =="09":
Mes="septiembre"
elif mes =="10":
Mes="octubre"
elif mes =="11":
Mes="noviembre"
elif mes =="12":
Mes="diciembre"
return Mes
def mes_num(mes):
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
if mes == Opciones2[0]:
Mes="01"
elif mes == Opciones2[1]:
Mes="02"
elif mes == Opciones2[2]:
Mes="03"
elif mes == Opciones2[3]:
Mes="04"
elif mes == Opciones2[4]:
Mes="05"
elif mes == Opciones2[5]:
Mes="06"
elif mes == Opciones2[6]:
Mes="07"
elif mes == Opciones2[7]:
Mes="08"
elif mes == Opciones2[8]:
Mes="09"
elif mes == Opciones2[9]:
Mes="10"
elif mes == Opciones2[10]:
Mes="11"
elif mes == Opciones2[11]:
Mes="12"
return Mes
def dia_esp(dia):
if dia =="01":
Dia="1"
elif dia =="02":
Dia="2"
elif dia =="03":
Dia="3"
elif dia =="04":
Dia="4"
elif dia =="05":
Dia="5"
elif dia =="06":
Dia="6"
elif dia =="07":
Dia="7"
elif dia =="08":
Dia="8"
elif dia =="09":
Dia="9"
else :
Dia = dia
return Dia
def set_font(rows,fila,col,size):
run=rows[fila].cells[col].paragraphs[0].runs
font = run[0].font
font.size= Pt(size)
font.name = 'Tahoma'
def replace_text_for_image(paragraph, key, value,wid,hei):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, "")
for val in value:
r = paragraph.add_run()
r.add_picture(val,width=Cm(wid), height=Cm(hei))
def replace_text_in_paragraph(paragraph, key, value):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, value)
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
st.set_page_config(
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="JULIA RD", # String or None. Strings get appended with "• Streamlit".
page_icon="📊", # String, anything supported by st.image, or None.
)
if User_validation():
#if True:
Opciones1=("Oferta Firme de Respaldo","Certificado de Reintegros","Informe Comercial")
eleccion=st.sidebar.selectbox('Seleccione el proyecto',Opciones1)
#if False:
if eleccion==Opciones1[0]:
st.header("Creación ofertas firmes de respaldo")
st.subheader("Introducción de los documentos")
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el consolidado base")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["Fecha"]=data["FECHAINI"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
P_bolsa=st.text_input("Introduzca el Precio de Escasez de Activación",value="10.00")
P_contrato=st.text_input("Introduzca el precio del contrato [USD]",value="10.00")
P_TMR=st.text_input("Introduzca el valor de la TRM",value="3,950.00")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
Agente_extra = st.text_input("Introduzca el nombre particular del agente")
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
Opciones3=("I","II","III","IV","V")
with columns_2[1]:
eleccion2=st.selectbox('Seleccione el mes de la OFR',Opciones2)
with columns_2[2]:
eleccion3=st.selectbox('Selecciona la semana de la OFR',Opciones3)
if Agente_extra:
Agente_extra="_"+Agente_extra
else:
Agente_extra=""
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/OFR/"+str(today.year)+"/"+mes_num(eleccion2)+"-"+eleccion2 +"/"+ eleccion3
Ruta_x="Documentos_exportar/"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = pd.unique(data_user["agente1"])
Respaldo = data[data["USUARIO"]== usuario]["CANTIDAD"].sum()
Fechas = pd.unique(data_user["Fecha"])
R_fechas = Range_fecha(Fechas)
Data_frame_fechas=dt_fechas(data.copy(),data_user,Fechas,Tipo_dia)
try:
Email = str(Extras[Extras["USUARIO"] == usuario]["CORREO"].values)
Porc_come = Extras[Extras["USUARIO"] == usuario]["MARGEN"].values[0]
except:
Email = ""
Porc_come = 0.1
st.warning("No hay | |
might thus contain extra 0s to make it
to the wanted length.
Remark: The cardinal is put to its real value with this constructor.
WARNING: This does not check the validity of the provided string. This might
create unexpected behaviour.
Args:
bstr (str): The binary string used to encode an element, must be made
only of 0s and 1s.
bigendian (bool): A boolean to indicate if the given binary string must
be read as big endian or little endian.
Returns:
DiscreteElement -- A new element corresponding to the provided binary string.
Raises:
ValueError: If the given string is not composed only of 0s and 1s.
"""
size = len(bstr)
result = cls(size)
result._card = -1
#Trivial cases:
if '1' not in bstr:
result._card = 0
result._number = 0
result._size = size
return result
if '0' not in bstr:
result._card = size
result._number = (1 << size) - 1
result._size = size
return result
result._card = bstr.count('1')
if bigendian:
result._number = int(bstr, 2)
else:
result._number = int(bstr[::-1], 2) #Reverse string
return result
################################################################################
################################################################################
################################################################################
# ***********
# Properties:
# ***********
@property
def cardinal(self):
"""
Gets the cardinal of the current element (the number
of possible states it is composed of).
Returns:
int -- The cardinal of the element.
"""
if self._card != -1:
return self._card
self._card = 0
n = self._number
while n != 0:
if n & 1 == 1:
self._card += 1
n >>= 1
return self._card
################################################################################
@property
def size(self):
"""
Gets the size of the frame of discernment on which the element is defined.
Returns:
int -- The size of the frame of discernment on which the element
is defined.
"""
return self._size
################################################################################
################################################################################
################################################################################
# **********************
# Set-theoretic methods:
# **********************
def opposite(self):
"""
Gets the opposite of the current element.
Returns:
DiscreteElement -- A new element which is the opposite
of the current one.
"""
result = DiscreteElement(self._size, (1 << self._size) - 1 - self._number)
if self._card != -1:
result.card = self._size - self._card
return result
################################################################################
@check_elements_compatibility
def conjunction(self, element):
"""
Gets the conjunction/intersection of the current element with the
given one.
Equivalent to ``self.intersection(element)``.
Args:
element (Element): The element with which conjunction/intersection
is requested.
Returns:
Element -- A new element which is the conjunction/intersection
of the current element with the given one.
Raises:
IncompatibleElementsError: If the current element and the given
one are not compatible (typically, there not defined on the same
frame of discernment, with exceptions for the complete set and the
empty set).
"""
return DiscreteElement(self._size, self._number & element._number)
################################################################################
def conjunction_unsafe(self, element):
"""
Gets the conjunction/intersection of the current element with the
given one.
Equivalent to ``self.intersection_unsafe(element)``.
WARNING: Does not check elements compatibility. This might create
unexpected behaviour.
Args:
element (Element): The element with which conjunction/intersection
is requested.
Returns:
Element -- A new element which is the conjunction/intersection
of the current element with the given one.
"""
return DiscreteElement.factory_constructor_unsafe(self._size,
self._number & element._number)
################################################################################
@check_elements_compatibility
def disjunction(self, element):
"""
Gets the disjunction/union of the current element with the
given one.
Equivalent to ``self.union(element)``.
Args:
element (Element): The element with which disjunction/union
is requested.
Returns:
Element -- A new element which is the disjunction/union
of the current element with the given one.
Raises:
IncompatibleElementsError: If the current element and the given
one are not compatible (typically, there not defined on the same
frame of discernment, with exceptions for the complete set and the
empty set).
"""
return DiscreteElement(self._size, self._number | element._number)
################################################################################
def disjunction_unsafe(self, element):
"""
Gets the disjunction/union of the current element with the
given one.
Equivalent to ``self.union_unsafe(element)``.
WARNING: Does not check elements compatibility. This might create
unexpected behaviour.
Args:
element (Element): The element with which disjunction/union
is requested.
Returns:
Element -- A new element which is the disjunction/union
of the current element with the given one.
"""
return DiscreteElement.factory_constructor_unsafe(self._size,
self._number | element._number)
################################################################################
def get_compatible_empty_element(self):
"""
Gets the empty element compatible with the current element.
Returns:
Element -- A new element, which is empty and compatible
with the current element.
"""
return DiscreteElement.get_empty_element(self._size)
################################################################################
def get_compatible_complete_element(self):
"""
Gets the complete set as an element compatible with the current
one.
Returns:
Element -- A new element, which is the complete set of states
and compatible with the current element.
"""
return DiscreteElement.get_complete_element(self._size)
################################################################################
def is_empty(self):
"""
Checks if the current element is the empty set.
Returns:
bool -- ``True`` if the current element is empty,
``False`` otherwise.
"""
return self._number == 0
################################################################################
def is_complete(self):
"""
Checks if the current element corresponds to the complete set.
Returns:
bool -- ``True`` if the current element corresponds to the
complete set, ``False`` otherwise.
"""
return self._number == (1 << self._size) - 1
################################################################################
################################################################################
################################################################################
# ****************
# Utility methods:
# ****************
def is_compatible(self, element):
"""
Checks if the current element and the given one are compatible
(to perform set-theoretic operations) or not.
Args:
element (Element): The element to check compatibility with.
Returns:
bool -- ``True`` if both elements are compatible, ``False``
otherwise.
"""
if not isinstance(element, DiscreteElement):
return False
return self._size == element._size
################################################################################
def equals(self, element):
"""
Checks if the current element and the given one are equal.
Args:
element (Element): The element to compare to.
Returns:
bool -- ``True`` if both elements are equal, ``False``
otherwise.
"""
if not self.is_compatible(element):
return False
return self._number == element._number
################################################################################
def formatted_str(self, *references):
"""
Provides a formatted string representing the element given a list of references.
References should be given in the order corresponding to the small-endianness
(the first reference is the first bit).
Args:
references (*object): A list of objects representing the "real" states (they
should support ``==`` and ``str()``).
Returns:
str -- Returns a string under the form `{state1 u state2 u ...}`.
Raises:
ValueError: If the reference list contains multiple times the same reference.
IncompatibleReferencesError: If the reference list seems to be incompatible
with the current element (typically if they differ in size).
"""
if self._size != len(references):
raise IncompatibleReferencesError(self, references)
for i in range(len(references)):
for j in range(len(references)):
if i != j and references[i] == references[j]:
raise ValueError(
"references: " + str(references) + "\n" +
"The list of references should not contain duplicates!"
)
result = ""
first = True
n = self._number
i = 0
while n != 0:
if n & 1 == 1:
if first:
result = str(references[i])
first = False
else:
result += " u " + str(references[i])
n >>= 1
i += 1
return "{" + result + "}"
################################################################################
################################################################################
################################################################################
# ******************************
# Overriding built-in functions:
# ******************************
def __hash__(self):
"""
Overrides ``hash()``, necessary to enable elements to be used
as dictionary keys.
WARNING: DiscreteElements with different sizes but the same
value will have the same hash. Anyway, you shouldn't mix them
within the same dictionary, so it should be fine, but you're
warned.
Returns:
int -- The hash code of the current element.
"""
return self._number
################################################################################
def __str__(self):
"""
Overrides ``str()``. Gives a string representation of the current element under the
form of a binary string (big endian).
Can be used to create elements with ``factory_from_str(str(element), bigendian=True)``.
Returns:
str -- Returns a binary string representing the current element.
"""
b = bin(self._number)[2:]
return "0" * (self._size - len(b)) + b #To add the missing 0s.
################################################################################
################################################################################
################################################################################
# **************
# Class methods:
# **************
@staticmethod
def get_empty_element(size):
"""
Provides the empty element of the given size.
Args:
size (int): The size of the frame of discernment on which the element
should be defined.
Returns:
DiscreteElement -- A new element that is the empty set.
"""
return DiscreteElement(size)
################################################################################
@staticmethod
def get_complete_element(size):
"""
Provides the complete element of the given size.
Args:
size (int): | |
<gh_stars>0
# MIT License
#
# Copyright (c) 2019 oval-group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# initial version taken from https://github.com/oval-group/GNN_branching
import gurobipy as grb
import numpy as np
import torch
from torch import nn
from regretnet import ibp
from regretnet.mipcertify.modules import View, Flatten
from regretnet.mipcertify.network_linear_approximation import LinearizedNetwork
def strip_view(network_layers):
# we should probably try to put minimal logic in here to strip off the layers that need to be stripped off.
if isinstance(network_layers[-1], ibp.View):
return network_layers[:-1]
elif (isinstance(network_layers[-1], ibp.View_Cut) and
isinstance(network_layers[-2], ibp.Sparsemax) and
isinstance(network_layers[-3], ibp.View)):
return network_layers[:-3]
else:
return network_layers
class MIPNetwork:
def __init__(self, base_layers, payment_layers, allocation_layers, n_agents, n_items, fractional_payment=False):
'''
layers: A list of Pytorch layers containing only Linear/ReLU/MaxPools
'''
self.layers = base_layers
self.net = nn.Sequential(*base_layers)
self.n_agents = n_agents
self.n_items = n_items
self.payment_layers = payment_layers
self.payment_head = nn.Sequential(*payment_layers)
self.allocation_layers = allocation_layers
self.allocation_head = nn.Sequential(*allocation_layers)
# Initialize a LinearizedNetwork object to determine the lower and
# upper bounds at each layer.
self.lin_net = LinearizedNetwork(base_layers, payment_layers, strip_view(allocation_layers))
self.fractional_payment = fractional_payment
def solve(self, inp_domain, truthful_util, timeout=None):
'''
inp_domain: Tensor containing in each row the lower and upper bound
for the corresponding dimension
Returns:
sat : boolean indicating whether the MIP is satisfiable.
solution: Feasible point if the MIP is satisfiable,
None otherwise.
timeout : Maximum allowed time to run, if is not None
'''
if self.lower_bounds[-1].min() > 0:
raise NotImplementedError("lower bounds don't yet reach all the way through")
print("Early stopping")
# The problem is infeasible, and we haven't setup the MIP
return (False, None, 0)
if timeout is not None:
self.model.setParam('TimeLimit', timeout)
if self.check_obj_value_callback:
raise NotImplementedError("this callback is not fixed yet")
def early_stop_cb(model, where):
if where == grb.GRB.Callback.MIP:
best_bound = model.cbGet(grb.GRB.Callback.MIP_OBJBND)
if best_bound > 0:
model.terminate()
if where == grb.GRB.Callback.MIPNODE:
nodeCount = model.cbGet(grb.GRB.Callback.MIPNODE_NODCNT)
if (nodeCount % 100) == 0:
print(f"Running Nb states visited: {nodeCount}")
if where == grb.GRB.Callback.MIPSOL:
obj = model.cbGet(grb.GRB.Callback.MIPSOL_OBJ)
if obj < 0:
# Does it have a chance at being a valid
# counter-example?
# Check it with the network
input_vals = model.cbGetSolution(self.gurobi_vars[0])
with torch.no_grad():
if isinstance(input_vals, list):
inps = torch.Tensor(input_vals).view(1, -1)
else:
assert isinstance(input_vals, grb.tupledict)
inps = torch.Tensor([val for val in input_vals.values()])
inps = inps.view((1,) + self.lower_bounds[0].shape)
out = self.net(inps).squeeze()
# In case there is several output to the network, get the minimum one.
out = out.min().item()
if out < 0:
model.terminate()
else:
def early_stop_cb(model, where):
if where == grb.GRB.Callback.MIPNODE:
nodeCount = model.cbGet(grb.GRB.Callback.MIPNODE_NODCNT)
if (nodeCount % 100) == 0:
print(f"Running Nb states visited: {nodeCount}")
self.model.optimize(early_stop_cb)
nb_visited_states = self.model.nodeCount
if self.model.status is grb.GRB.INFEASIBLE:
# Infeasible: No solution
print("Infeasible.")
return (False, None, nb_visited_states)
elif self.model.status is grb.GRB.OPTIMAL:
print("Optimal.")
# There is a feasible solution. Return the feasible solution as well.
len_inp = len(self.gurobi_vars[0])
# Get the input that gives the feasible solution.
#input_vals = self.model.cbGetSolution(self.gurobi_vars[0])
#inps = torch.Tensor([val for val in input_vals.values()])
#inps = inps.view((1,) + self.lower_bounds[0].shape)
optim_val = self.final_util_expr.getValue()
return ((truthful_util - optim_val) < 0, (None, optim_val), nb_visited_states)
elif self.model.status is grb.GRB.INTERRUPTED:
print("Interrupted.")
obj_bound = self.model.ObjBound
if obj_bound > 0:
return (False, None, nb_visited_states)
else:
# There is a feasible solution. Return the feasible solution as well.
len_inp = len(self.gurobi_vars[0])
# Get the input that gives the feasible solution.
inp = torch.Tensor(len_inp)
if isinstance(self.gurobi_vars[0], list):
for idx, var in enumerate(self.gurobi_vars[0]):
inp[idx] = var.x
else:
#assert isinstance(self.gurobi_vars[0], grb.tupledict)
inp = torch.zeros_like(self.lower_bounds[0])
for idx, var in self.gurobi_vars[0].items():
inp[idx] = var.x
optim_val = self.final_util_expr.getValue()
return ((truthful_util - optim_val) < 0, (inp, optim_val), nb_visited_states)
elif self.model.status is grb.GRB.TIME_LIMIT:
# We timed out, return a None Status
return (None, None, nb_visited_states)
else:
raise Exception("Unexpected Status code")
def tune(self, param_outfile, tune_timeout):
self.model.Params.tuneOutput = 1
self.model.Params.tuneTimeLimit = tune_timeout
self.model.tune()
# Get the best set of parameters
self.model.getTuneResult(0)
self.model.write(param_outfile)
def do_interval_analysis(self, inp_domain):
self.lower_bounds = []
self.upper_bounds = []
self.lower_bounds.append(inp_domain.select(-1, 0))
self.upper_bounds.append(inp_domain.select(-1, 1))
layer_idx = 1
current_lb = self.lower_bounds[-1]
current_ub = self.upper_bounds[-1]
for layer in self.layers:
if isinstance(layer, nn.Linear):
pos_weights = torch.clamp(layer.weight, min=0)
neg_weights = torch.clamp(layer.weight, max=0)
new_layer_lb = torch.mv(pos_weights, current_lb) + \
torch.mv(neg_weights, current_ub) + \
layer.bias
new_layer_ub = torch.mv(pos_weights, current_ub) + \
torch.mv(neg_weights, current_lb) + \
layer.bias
self.lower_bounds.append(new_layer_lb)
self.upper_bounds.append(new_layer_ub)
current_lb = new_layer_lb
current_ub = new_layer_ub
elif isinstance(layer, nn.ReLU):
current_lb = torch.clamp(current_lb, min=0)
current_ub = torch.clamp(current_ub, min=0)
elif type(layer) == View:
continue
elif type(layer) == Flatten:
current_lb = current_lb.view(-1)
current_ub = current_ub.view(-1)
else:
raise NotImplementedError
def setup_model(self, inp_domain,
truthful_input,
truthful_util,
regret_tolerance=0.0001,
use_obj_function=False,
bounds="opt",
parameter_file=None,
player_ind=None):
'''
inp_domain: Tensor containing in each row the lower and upper bound
for the corresponding dimension
optimal: If False, don't use any objective function, simply add a constraint on the output
If True, perform optimization and use callback to interrupt the solving when a
counterexample is found
bounds: string, indicate what type of method should be used to get the intermediate bounds
parameter_file: Load a set of parameters for the MIP solver if a path is given.
Setup the model to be optimized by Gurobi
'''
if player_ind is None:
assert self.n_agents == 1, "Must specify player_ind for >1 agent"
self.player_ind = 0
else:
self.player_ind = player_ind
if bounds == "opt":
# First use define_linear_approximation from LinearizedNetwork to
# compute upper and lower bounds to be able to define Ms
self.lin_net.define_linear_approximation(inp_domain)
self.lower_bounds = list(map(torch.Tensor, self.lin_net.lower_bounds))
self.upper_bounds = list(map(torch.Tensor, self.lin_net.upper_bounds))
self.payment_lower_bounds = list(map(torch.Tensor, self.lin_net.payment_lower_bounds))
self.payment_upper_bounds = list(map(torch.Tensor, self.lin_net.payment_upper_bounds))
self.allocation_lower_bounds = list(map(torch.Tensor, self.lin_net.allocation_lower_bounds))
self.allocation_upper_bounds = list(map(torch.Tensor, self.lin_net.allocation_upper_bounds))
elif bounds == "interval":
raise NotImplementedError("interval stuff is currently not working")
self.do_interval_analysis(inp_domain)
if self.lower_bounds[-1][0] > 0:
# The problem is already guaranteed to be infeasible,
# Let's not waste time setting up the MIP
return
else:
raise NotImplementedError("Unknown bound computation method.")
self.gurobi_vars = []
self.model = grb.Model()
self.model.setParam('OutputFlag', False)
self.model.setParam('Threads', 1)
self.model.setParam('DualReductions', 0)
if parameter_file is not None:
self.model.read(parameter_file)
self.zero_var = self.model.addVar(lb=0, ub=0, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'zero')
# First add the input variables as Gurobi variables.
if inp_domain.dim() == 2:
inp_gurobi_vars = self.model.addVars([i for i in range(inp_domain.numel() // 2)],
lb=self.lower_bounds[0],
ub=self.upper_bounds[0],
name='inp')
inp_gurobi_vars = [var for key, var in inp_gurobi_vars.items()]
else:
raise Exception(f"input shape is {inp_domain.shape} but it should be upper and lower bounds for a flat linear input (i.e. N x 2)")
self.gurobi_vars.append(inp_gurobi_vars)
self.construct_model_layers(self.gurobi_vars, self.layers, self.lower_bounds, self.upper_bounds, var_name_str='trunk')
self.payment_gurobi_vars = []
self.payment_gurobi_vars.append(self.gurobi_vars[-1]) # the inputs to payment are the final ReLUs of trunk
self.construct_model_layers(self.payment_gurobi_vars, self.payment_layers, self.payment_lower_bounds, self.payment_upper_bounds, var_name_str='payment')
self.allocation_gurobi_vars = []
self.allocation_gurobi_vars.append(self.gurobi_vars[-1])
self.construct_model_layers(self.allocation_gurobi_vars, self.allocation_layers, self.allocation_lower_bounds, self.allocation_upper_bounds, var_name_str='allocation')
final_alloc = self.allocation_gurobi_vars[-1]
final_player_alloc = final_alloc[self.player_ind, :]
if not self.fractional_payment:
self.final_player_payment = self.payment_gurobi_vars[-1][self.player_ind]
else:
shaped_input_vars = np.reshape(np.array(self.gurobi_vars[0]), (self.n_agents, self.n_items))
player_input_val = shaped_input_vars[self.player_ind, :]
frac_payment = self.payment_gurobi_vars[-1][self.player_ind]
alloc_value_expr = grb.quicksum(player_input_val[i]*final_player_alloc[i] for i in range(self.n_items))
alloc_value = self.model.addVar(name='player_alloc_value')
self.model.addConstr(alloc_value == alloc_value_expr)
self.final_player_payment = frac_payment*alloc_value
self.model.setParam("NonConvex", 2) # needed for quadratic equality constraints (Gurobi 9.0 only)
player_truthful_input = truthful_input[self.player_ind, :]
self.final_util_expr = grb.LinExpr(player_truthful_input, final_player_alloc) - self.final_player_payment
if not use_obj_function:
self.model.addConstr(self.final_util_expr >= (truthful_util + regret_tolerance))
self.model.setObjective(0, grb.GRB.MAXIMIZE)
self.check_obj_value_callback = False
else:
# maximize the final utility
self.model.setObjective(self.final_util_expr, grb.GRB.MAXIMIZE)
# TODO set this to True and fix the callback code
# it's not clear that we actually want to do callbacks for this case though
# for our application we may want to find the worst violation, as opposed to | |
aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 | |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from contextlib import contextmanager
import collections
import uuid
import inspect
import cachetools
import numpy as np
from builtins import object
from functools import wraps
from collections import defaultdict
import abc
from future.utils import with_metaclass
from neon.op_graph.axes import TensorDescription, \
make_axis, make_axes, Axes, FlattenedAxis, slice_axis, default_dtype, \
default_int_dtype, AxesMap, UnmatchedAxesError
from neon.util.names import ScopedNameableValue
from neon.util.threadstate import get_thread_state
from orderedset import OrderedSet
from cached_property import cached_property
def tensor_descriptions(args):
"""
A list of tensor descriptions for Ops.
Arguments:
args: A list of Ops.
Returns:
A list of the Op's tensor descriptions.
"""
return (arg.tensor_description() for arg in args)
def tdcache():
"""
Decorator to mark tensor description method as cached.
Returns:
Cache decorator set to use a particular cache.
"""
return cachetools.cached(cache=tdcache.tensor_description_cache)
tdcache.tensor_description_cache = {}
@contextmanager
def metadata(**metadata):
"""
Capture all Ops created within the context. Hides ops created in this
context from parent contexts.
"""
with Op.all_ops() as ops:
yield
for op in ops:
if isinstance(op, TensorValueOp):
# make sure tensorvalue op matches thing it reads from
op.metadata.update(op.value_tensor.metadata)
else:
op.metadata.update(metadata)
def with_op_metadata(f, metadata=None):
"""
Decorator to add metadata to all ops created inside the decorated function.
If this decorator is applied to a method of a class with a class
variable `metadata` defined as a dictionary then we add that to the
op metadata to attach.
"""
metadata = metadata or dict()
assert isinstance(metadata, dict), "Metadata must be dict, not {}".format(type(metadata))
@wraps(f)
def wrapper(*args, **kwargs):
with Op.all_ops() as ops:
result = f(*args, **kwargs)
# If this decorator is applied to a method of a class with a class
# variable called `metadata` then we add that to the
if len(args) > 0 and hasattr(type(args[0]), 'metadata'):
metadata.update(type(args[0]).metadata)
for op in ops:
op.metadata.update(metadata)
return result
return wrapper
class DebugInfo(object):
"""Mixin that captures file/line location of an object's creation."""
def __init__(self, **kwargs):
# TODO This is a good first cut for debugging info, but it would be nice to
# TODO be able to reliably walk the stack back to user code rather than just
# TODO back past this constructor
super(DebugInfo, self).__init__(**kwargs)
frame = None
try:
frame = inspect.currentframe()
while frame.f_locals.get('self', None) is self:
frame = frame.f_back
while frame:
filename, lineno, function, code_context, index = inspect.getframeinfo(
frame)
if -1 == filename.find('ngraph/op_graph'):
break
frame = frame.f_back
self.filename = filename
self.lineno = lineno
self.code_context = code_context
finally:
del frame
@property
def file_info(self):
"""
Return file location that created the node.
Returns:
String with file location that created the node.
"""
return 'File "{filename}", line {lineno}'.format(
filename=self.filename, lineno=self.lineno)
class Op(ScopedNameableValue):
"""
Any operation that can be in an AST.
Arguments:
args: Values used by this node.
const: The value of a constant Op, or None,
constant (bool): The Op is constant. Default False.
forward: If not None, the node to use instead of this node.
metadata: String key value dictionary for frontend metadata.
kwargs: Args defined in related classes.
Attributes:
const: The value of a constant.
constant (bool): The value is constant.
control_deps (OrderedSet): Ops in addtion to args that must run before this op.
persistent (bool): The value will be retained from computation to computation and
not shared. Always True if reference is set.
metadata: Dictionary with of string keys and values used for attaching
arbitrary metadata to nodes.
trainable: The value is trainable.
"""
# Default is to not collect Ops as they are created
@staticmethod
def _get_thread_ops():
"""
:return: The stack of Ops being collected.
"""
try:
ops = get_thread_state().ops
except AttributeError:
ops = [None]
get_thread_state().ops = ops
return ops
@staticmethod
def get_all_ops():
try:
all_ops = get_thread_state().all_ops
except AttributeError:
all_ops = [None]
get_thread_state().all_ops = all_ops
return all_ops
# We need to create another stack here because all_ops and captured_ops
# have different semantics that don't work with a shared stack
@staticmethod
@contextmanager
def all_ops(ops=None, isolate=False):
"""
Collects all Ops created within the context. Does not hide ops created
in this context from parent contexts unless isolate is True.
"""
if ops is None:
ops = []
try:
all_ops = Op.get_all_ops()
all_ops.append(ops)
yield (ops)
finally:
all_ops.pop()
parent = all_ops[-1]
if not isolate and parent is not None:
parent.extend(ops)
@staticmethod
def all_op_references(ops):
"""
Currently ops can have references to other ops anywhere in their __dict__, (not just args,
but the other typical places handled in serialization's `add_edges`). This function
iterates through an ops __dict__ attributes and tests if any of them are subclasses of
`Op`.
This is 'greedier' than the `ordered_ops` method which only traverses the graph using the
`args` and `control_deps` keys of an ops `__dict__`. In addition, the order of ops
returned by this method is not guaranteed to be in a valid linear execution ordering.
"""
op_set = OrderedSet()
frontier = OrderedSet(ops)
while frontier:
op = frontier.pop()
op_set.add(op)
for key in op.__dict__:
val = getattr(op, key)
if isinstance(val, Op) and val not in op_set:
frontier.add(val)
elif isinstance(val, dict):
for subkey in val:
if isinstance(val[subkey], Op) and val[subkey] not in op_set:
frontier.add(val[subkey])
elif isinstance(val, (list, tuple, set, OrderedSet)):
for item in val:
if isinstance(item, Op) and item not in op_set:
frontier.add(item)
return op_set
@staticmethod
def ordered_ops(roots):
"""
Topological sort of ops reachable from roots. Note that ngraph is
using depenency edges rather than dataflow edges, for example,
`top_sort(a -> b -> c) => [c, b, a]`.
Args:
roots: List of ops.
Returns:
A list of sorted ops.
"""
ordered_ops = []
available = OrderedSet()
counts = dict()
parents = defaultdict(OrderedSet)
ready = OrderedSet()
available.update(root.forwarded for root in roots)
while available:
node = available.pop()
if node in counts or node in ready:
continue
children = OrderedSet((child.forwarded for child in node.all_deps))
if children:
counts[node] = len(children)
for child in children:
parents[child].add(node)
available.update(children)
else:
ready.add(node)
while ready:
node = ready.pop()
ordered_ops.append(node)
for p in parents.get(node, []):
count = counts[p] - 1
if count == 0:
ready.add(p)
del counts[p]
else:
counts[p] = count
if len(counts) > 0:
raise ValueError("Graph not a DAG")
return ordered_ops
@staticmethod
def visit_input_closure(roots, fun):
"""
Apply function `fun` in the topological sorted order of roots.
Args:
roots: List of ops.
Returns:
None
"""
for op in Op.ordered_ops(roots):
fun(op)
def __init__(self,
args=(),
metadata=None,
const=None,
constant=False,
persistent=False,
trainable=False,
**kwargs):
super(Op, self).__init__(**kwargs)
self._args = None
self._set_args(as_op(arg) for arg in args)
self.metadata = dict()
if metadata is not None:
if not isinstance(metadata, dict):
raise ValueError("Metadata must be of type dict,"
"not {} of {}".format(type(metadata), metadata))
self.metadata.update(metadata)
# List to keep generation deterministic
self._control_deps = OrderedSet()
self._deriv_handler = None
self._const = const
self.uuid = uuid.uuid4()
self._is_constant = constant
self._is_persistent = persistent
self._is_trainable = trainable
# Add this op to the all op accounting lists
ops = Op._get_thread_ops()[-1]
if ops is not None:
ops.append(self)
all_ops = Op.get_all_ops()[-1]
if all_ops is not None:
all_ops.append(self)
self.style = {}
self._forward = None
def copy_with_new_args(self, args):
"""
This method creates a new op given an original op and new args. The purpose here
is to replace args for an op with layout conversions as needed but keep the op the same
otherwise.
"""
return (type(self))(*args)
def _set_args(self, args):
"""
Internal function. Changes args.
Args:
args: The new arguments.
"""
self._args = tuple(args)
self.invalidate_property_cache('all_deps')
self.invalidate_property_cache('call_info')
@property
def tensor(self):
"""
Deprecated. See effective_tensor_op.
Returns: The op providing the value.
"""
return self.forwarded
@property
def effective_tensor_op(self):
"""
The op that provides the value for this op.
For example, for a TensorValueOp, the op itself provides the value of the state,
while for a SequenceOp, the last op in the sequence will provide the | |
48790: "4360 4450 4545",
48791: "4360 4450 4546",
48792: "4360 4451",
48793: "4360 4451 4520",
48794: "4360 4451 4521",
48795: "4360 4451 4522",
48796: "4360 4451 4523",
48797: "4360 4451 4524",
48798: "4360 4451 4525",
48799: "4360 4451 4526",
48800: "4360 4451 4527",
48801: "4360 4451 4528",
48802: "4360 4451 4529",
48803: "4360 4451 4530",
48804: "4360 4451 4531",
48805: "4360 4451 4532",
48806: "4360 4451 4533",
48807: "4360 4451 4534",
48808: "4360 4451 4535",
48809: "4360 4451 4536",
48810: "4360 4451 4537",
48811: "4360 4451 4538",
48812: "4360 4451 4539",
48813: "4360 4451 4540",
48814: "4360 4451 4541",
48815: "4360 4451 4542",
48816: "4360 4451 4543",
48817: "4360 4451 4544",
48818: "4360 4451 4545",
48819: "4360 4451 4546",
48820: "4360 4452",
48821: "4360 4452 4520",
48822: "4360 4452 4521",
48823: "4360 4452 4522",
48824: "4360 4452 4523",
48825: "4360 4452 4524",
48826: "4360 4452 4525",
48827: "4360 4452 4526",
48828: "4360 4452 4527",
48829: "4360 4452 4528",
48830: "4360 4452 4529",
48831: "4360 4452 4530",
48832: "4360 4452 4531",
48833: "4360 4452 4532",
48834: "4360 4452 4533",
48835: "4360 4452 4534",
48836: "4360 4452 4535",
48837: "4360 4452 4536",
48838: "4360 4452 4537",
48839: "4360 4452 4538",
48840: "4360 4452 4539",
48841: "4360 4452 4540",
48842: "4360 4452 4541",
48843: "4360 4452 4542",
48844: "4360 4452 4543",
48845: "4360 4452 4544",
48846: "4360 4452 4545",
48847: "4360 4452 4546",
48848: "4360 4453",
48849: "4360 4453 4520",
48850: "4360 4453 4521",
48851: "4360 4453 4522",
48852: "4360 4453 4523",
48853: "4360 4453 4524",
48854: "4360 4453 4525",
48855: "4360 4453 4526",
48856: "4360 4453 4527",
48857: "4360 4453 4528",
48858: "4360 4453 4529",
48859: "4360 4453 4530",
48860: "4360 4453 4531",
48861: "4360 4453 4532",
48862: "4360 4453 4533",
48863: "4360 4453 4534",
48864: "4360 4453 4535",
48865: "4360 4453 4536",
48866: "4360 4453 4537",
48867: "4360 4453 4538",
48868: "4360 4453 4539",
48869: "4360 4453 4540",
48870: "4360 4453 4541",
48871: "4360 4453 4542",
48872: "4360 4453 4543",
48873: "4360 4453 4544",
48874: "4360 4453 4545",
48875: "4360 4453 4546",
48876: "4360 4454",
48877: "4360 4454 4520",
48878: "4360 4454 4521",
48879: "4360 4454 4522",
48880: "4360 4454 4523",
48881: "4360 4454 4524",
48882: "4360 4454 4525",
48883: "4360 4454 4526",
48884: "4360 4454 4527",
48885: "4360 4454 4528",
48886: "4360 4454 4529",
48887: "4360 4454 4530",
48888: "4360 4454 4531",
48889: "4360 4454 4532",
48890: "4360 4454 4533",
48891: "4360 4454 4534",
48892: "4360 4454 4535",
48893: "4360 4454 4536",
48894: "4360 4454 4537",
48895: "4360 4454 4538",
48896: "4360 4454 4539",
48897: "4360 4454 4540",
48898: "4360 4454 4541",
48899: "4360 4454 4542",
48900: "4360 4454 4543",
48901: "4360 4454 4544",
48902: "4360 4454 4545",
48903: "4360 4454 4546",
48904: "4360 4455",
48905: "4360 4455 4520",
48906: "4360 4455 4521",
48907: "4360 4455 4522",
48908: "4360 4455 4523",
48909: "4360 4455 4524",
48910: "4360 4455 4525",
48911: "4360 4455 4526",
48912: "4360 4455 4527",
48913: "4360 4455 4528",
48914: "4360 4455 4529",
48915: "4360 4455 4530",
48916: "4360 4455 4531",
48917: "4360 4455 4532",
48918: "4360 4455 4533",
48919: "4360 4455 4534",
48920: "4360 4455 4535",
48921: "4360 4455 4536",
48922: "4360 4455 4537",
48923: "4360 4455 4538",
48924: "4360 4455 4539",
48925: "4360 4455 4540",
48926: "4360 4455 4541",
48927: "4360 4455 4542",
48928: "4360 4455 4543",
48929: "4360 4455 4544",
48930: "4360 4455 4545",
48931: "4360 4455 4546",
48932: "4360 4456",
48933: "4360 4456 4520",
48934: "4360 4456 4521",
48935: "4360 4456 4522",
48936: "4360 4456 4523",
48937: "4360 4456 4524",
48938: "4360 4456 4525",
48939: "4360 4456 4526",
48940: "4360 4456 4527",
48941: "4360 4456 4528",
48942: "4360 4456 4529",
48943: "4360 4456 4530",
48944: "4360 4456 4531",
48945: "4360 4456 4532",
48946: "4360 4456 4533",
48947: "4360 4456 4534",
48948: "4360 4456 4535",
48949: "4360 4456 4536",
48950: "4360 4456 4537",
48951: "4360 4456 4538",
48952: "4360 4456 4539",
48953: "4360 4456 4540",
48954: "4360 4456 4541",
48955: "4360 4456 4542",
48956: "4360 4456 4543",
48957: "4360 4456 4544",
48958: "4360 4456 4545",
48959: "4360 4456 4546",
48960: "4360 4457",
48961: "4360 4457 4520",
48962: "4360 4457 4521",
48963: "4360 4457 4522",
48964: "4360 4457 4523",
48965: "4360 4457 4524",
48966: "4360 4457 4525",
48967: "4360 4457 4526",
48968: "4360 4457 4527",
48969: "4360 4457 4528",
48970: "4360 4457 4529",
48971: "4360 4457 4530",
48972: "4360 4457 4531",
48973: "4360 4457 4532",
48974: "4360 4457 4533",
48975: "4360 4457 4534",
48976: "4360 4457 4535",
48977: "4360 4457 4536",
48978: "4360 4457 4537",
48979: "4360 4457 4538",
48980: "4360 4457 4539",
48981: "4360 4457 4540",
48982: "4360 4457 4541",
48983: "4360 4457 4542",
48984: "4360 4457 4543",
48985: "4360 4457 4544",
48986: "4360 4457 4545",
48987: "4360 4457 4546",
48988: "4360 4458",
48989: "4360 4458 4520",
48990: "4360 4458 4521",
48991: "4360 4458 4522",
48992: "4360 4458 4523",
48993: "4360 4458 4524",
48994: "4360 4458 4525",
48995: "4360 4458 4526",
48996: "4360 4458 4527",
48997: "4360 4458 4528",
48998: "4360 4458 4529",
48999: "4360 4458 4530",
49000: "4360 4458 4531",
49001: "4360 4458 4532",
49002: "4360 4458 4533",
49003: "4360 4458 4534",
49004: "4360 4458 4535",
49005: "4360 4458 4536",
49006: "4360 4458 4537",
49007: "4360 4458 4538",
49008: "4360 4458 4539",
49009: "4360 4458 4540",
49010: "4360 4458 4541",
49011: "4360 4458 4542",
49012: "4360 4458 4543",
49013: "4360 4458 4544",
49014: "4360 4458 4545",
49015: "4360 4458 4546",
49016: "4360 4459",
49017: "4360 4459 4520",
49018: "4360 4459 4521",
49019: "4360 4459 4522",
49020: "4360 4459 4523",
49021: "4360 4459 4524",
49022: "4360 4459 4525",
49023: "4360 4459 4526",
49024: "4360 4459 4527",
49025: "4360 4459 4528",
49026: "4360 4459 4529",
49027: "4360 4459 4530",
49028: "4360 4459 4531",
49029: "4360 4459 4532",
49030: "4360 4459 4533",
49031: "4360 4459 4534",
49032: "4360 4459 4535",
49033: "4360 4459 4536",
49034: "4360 4459 4537",
49035: "4360 4459 4538",
49036: "4360 4459 4539",
49037: "4360 4459 4540",
49038: "4360 4459 4541",
49039: "4360 4459 4542",
49040: "4360 4459 4543",
49041: "4360 4459 4544",
49042: "4360 4459 4545",
49043: "4360 4459 4546",
49044: "4360 4460",
49045: "4360 4460 4520",
49046: "4360 4460 4521",
49047: "4360 4460 4522",
49048: "4360 4460 4523",
49049: "4360 4460 4524",
49050: "4360 4460 4525",
49051: "4360 4460 4526",
49052: "4360 4460 4527",
49053: "4360 4460 4528",
49054: "4360 4460 4529",
49055: "4360 4460 4530",
49056: "4360 4460 4531",
49057: "4360 4460 4532",
49058: "4360 4460 4533",
49059: "4360 4460 4534",
49060: "4360 4460 4535",
49061: "4360 4460 4536",
49062: "4360 4460 4537",
49063: "4360 4460 4538",
49064: "4360 4460 4539",
49065: "4360 4460 4540",
49066: "4360 4460 4541",
49067: "4360 4460 4542",
49068: "4360 4460 4543",
49069: "4360 4460 4544",
49070: "4360 4460 4545",
49071: "4360 4460 4546",
49072: "4360 4461",
49073: "4360 4461 4520",
49074: "4360 4461 4521",
49075: "4360 4461 4522",
49076: "4360 4461 4523",
49077: "4360 4461 4524",
49078: "4360 4461 4525",
49079: "4360 4461 4526",
49080: "4360 4461 4527",
49081: "4360 4461 4528",
49082: "4360 4461 4529",
49083: "4360 4461 4530",
49084: "4360 4461 4531",
49085: "4360 4461 4532",
49086: "4360 4461 4533",
49087: "4360 4461 4534",
49088: "4360 4461 4535",
49089: "4360 4461 4536",
49090: "4360 4461 4537",
49091: "4360 4461 4538",
49092: "4360 4461 4539",
49093: "4360 4461 4540",
49094: "4360 4461 4541",
49095: "4360 4461 4542",
49096: "4360 4461 4543",
49097: "4360 4461 4544",
49098: "4360 4461 4545",
49099: "4360 4461 4546",
49100: "4360 4462",
49101: "4360 4462 4520",
49102: "4360 4462 4521",
49103: "4360 4462 4522",
49104: "4360 4462 4523",
49105: "4360 4462 4524",
49106: "4360 4462 4525",
49107: "4360 4462 4526",
49108: "4360 4462 4527",
49109: "4360 4462 4528",
49110: "4360 4462 4529",
49111: "4360 4462 4530",
49112: "4360 4462 4531",
| |
<reponame>arccode/factory
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests keyboard functionality.
Description
-----------
This test check basic keyboard functionality by asking operator to press each
keys on keyboard once at a time.
The layout of the keyboard is derived from vpd 'region' value, and can be
overwritten by argument ``layout``.
If ``allow_multi_keys`` is True, the operator can press multiple keys at once
to speed up the testing.
If ``sequential_press`` or ``strict_sequential_press`` is True, the operator
have to press each key in order from top-left to bottom-right. Additionally, if
``strict_sequential_press`` is True, the test would fail if the operator press
the wrong key.
A dict ``repeat_times`` can be specified to indicate number of times each key
have to be pressed before the key is marked as checked.
The test would fail after ``timeout_secs`` seconds.
Test Procedure
--------------
1. The test shows an image of the keyboard, and each key labeled with how many
times it need to be pressed.
2. Operator press each key the number of times needed, and keys on UI would be
marked as such.
3. The test pass when all keys have been pressed for the number of times
needed, or fail after ``timeout_secs`` seconds.
Dependency
----------
Depends on 'evdev' module to monitor key presses.
Examples
--------
To test keyboard functionality, add this into test list::
{
"pytest_name": "keyboard"
}
To test keyboard functionality, allow multiple keys to be pressed at once, and
have a timeout of 10 seconds, add this into test list::
{
"pytest_name": "keyboard",
"args": {
"allow_multi_keys": true,
"timeout_secs": 10
}
}
To test keyboard functionality, ask operator to press keys in order, skip
keycode [4, 5, 6], have keycode 3 be pressed 5 times, and other keys be pressed
2 times to pass, add this into test list::
{
"pytest_name": "keyboard",
"args": {
"sequential_press": true,
"skip_keycodes": [4, 5, 6],
"repeat_times": {
"3": 5,
"default": 2
}
}
}
To test keyboard functionality, ask operator to press keys in order (and fail
the test if wrong key is pressed), and set keyboard layout to ISO, add this
into test list::
{
"pytest_name": "keyboard",
"args": {
"strict_sequential_press": true,
"layout": "ISO"
}
}
"""
import ast
import os
import re
import time
from cros.factory.test.l10n import regions
from cros.factory.test import session
from cros.factory.test import test_case
from cros.factory.test.utils import evdev_utils
from cros.factory.testlog import testlog
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import file_utils
from cros.factory.utils import process_utils
from cros.factory.utils import schema
from cros.factory.external import evdev
_RE_EVTEST_EVENT = re.compile(
r'^Event: time .*?, type .*? \((.*?)\), code (.*?) \(.*?\), value (.*?)$')
_POWER_KEY_CODE = 116
_NUMPAD = 'numpad'
_INTEGER_STRING_SCHEMA = {
'type': 'string',
'pattern': r'^(0[Bb][01]+|0[Oo][0-7]+|0[Xx][0-9A-Fa-f]+|[1-9][0-9]*|0)$'
}
_REPLACEMENT_KEYMAP_SCHEMA = schema.JSONSchemaDict(
'replacement_keymap schema object', {
'type': 'object',
'propertyNames': _INTEGER_STRING_SCHEMA,
'patternProperties': {
'^.*$': _INTEGER_STRING_SCHEMA
}
})
# Please check:
# ~/trunk/src/third_party/coreboot/src/acpi/acpigen_ps2_keybd.c
# ~/trunk/src/third_party/coreboot/src/include/input-event-codes.h
_ACTION_KEYS_SCANCODE_TO_KEYCODE = {
0xea: 158, # KEY_BACK
0xe9: 159, # KEY_FORWARD
0xe7: 173, # KEY_REFRESH
0x91: 0x174, # KEY_FULL_SCREEN
0x92: 120, # KEY_SCALE
0xa0: 113, # KEY_MUTE
0xae: 114, # KEY_VOLUMEDOWN
0xb0: 115, # KEY_VOLUMEUP
0x9a: 164, # KEY_PLAYPAUSE
0x99: 163, # KEY_NEXTSONG
0x90: 165, # KEY_PREVIOUSSONG
0x93: 99, # KEY_SYSRQ
0x94: 224, # KEY_BRIGHTNESSDOWN
0x95: 225, # KEY_BRIGHTNESSUP
0x97: 229, # KEY_KBDILLUMDOWN
0x98: 230, # KEY_KBDILLUMUP
0x96: 0x279, # KEY_PRIVACY_SCREEN_TOGGLE
}
class KeyboardTest(test_case.TestCase):
"""Tests if all the keys on a keyboard are functioning. The test checks for
keydown and keyup events for each key, following certain order if required,
and passes if both events of all keys are received.
Among the args are two related arguments:
- sequential_press: a keycode is simply ignored if the key is not pressed
in order
- strict_sequential_press: the test failed immediately if a key is skipped.
"""
ARGS = [
Arg(
'allow_multi_keys', bool, 'Allow multiple keys pressed '
'simultaneously. (Less strictly checking '
'with shorter cycle time)', default=False),
Arg(
'multi_keys_delay', (int, float), 'When ``allow_multi_keys`` is '
'``False``, do not fail the test if the delay between the '
'consecutivepresses is more than ``multi_keys_delay`` seconds.',
default=0),
Arg(
'layout', str, 'Use specified layout other than derived from VPD. '
'If None, the layout from the VPD is used.', default=None),
Arg('timeout_secs', int, 'Timeout for the test.', default=30),
Arg(
'sequential_press', bool, 'Indicate whether keycodes need to be '
'pressed sequentially or not.', default=False),
Arg(
'strict_sequential_press', bool, 'Indicate whether keycodes need to '
'be pressed strictly sequentially or not.', default=False),
Arg('board', str,
'If presents, in filename, the board name is appended after layout.',
default=''),
Arg(
'device_filter', (int, str),
'If present, the input event ID or a substring of the input device '
'name specifying which keyboard to test.', default=None),
Arg('skip_power_key', bool, 'Skip power button testing', default=False),
Arg('skip_keycodes', list, 'Keycodes to skip', default=[]),
Arg(
'replacement_keymap', dict, 'Dictionary mapping key codes to '
'replacement keycodes. The keycodes must be a string of an integer'
'since json does not support format like 0x10.', default={},
schema=_REPLACEMENT_KEYMAP_SCHEMA),
Arg(
'detect_long_press', bool, 'Detect long press event. Usually for '
'detecting bluetooth keyboard disconnection.', default=False),
Arg(
'repeat_times', dict, 'A dict object {key_code: times} to specify '
'number of presses required for keys specified in key code, e.g. '
'``{"28": 3, "57": 5}``, then ENTER (28) shall be pressed 3 times '
'while SPACE (57) shall be pressed 5 times. If you want all keys to '
'be pressed twice, you can do: ``{"default": 2}``. '
'You can find keycode mappings in /usr/include/linux/input.h',
default=None),
Arg('has_numpad', bool, 'The keyboard has a number pad or not.',
default=False),
Arg('vivaldi_keyboard', bool, 'Get function keys map from sysfs.',
default=True),
]
def setUp(self):
self.assertTrue(not (self.args.allow_multi_keys and
self.args.sequential_press),
'Sequential press requires one key at a time.')
self.assertTrue(not (self.args.allow_multi_keys and
self.args.strict_sequential_press),
'Strict sequential press requires one key at a time.')
self.assertTrue(self.args.multi_keys_delay >= 0,
'multi_keys_delay should be a positive number.')
if self.args.allow_multi_keys and self.args.multi_keys_delay > 0:
session.console.warning('multi_keys_delay is not effective when '
'allow_multi_keys is set to True.')
# Get the keyboard input device.
try:
self.keyboard_device = evdev_utils.FindDevice(
self.args.device_filter, evdev_utils.IsKeyboardDevice)
except evdev_utils.MultipleDevicesFoundError:
session.console.info(
"Please set the test argument 'device_filter' to one of the name.")
raise
# Initialize keyboard layout and bindings
self.layout = self.GetKeyboardLayout()
if self.args.board:
self.layout += '_%s' % self.args.board
self.bindings = self.ReadBindings(self.layout)
self.numpad_keys = None
if self.args.has_numpad:
self.numpad_keys = self.ReadKeyOrder(_NUMPAD)
replacement_keymap = {}
if self.args.vivaldi_keyboard:
replacement_keymap = self.GetVivaldiKeyboardActionKeys()
# Apply any replacement keymap
if self.args.replacement_keymap:
replacement_keymap.update({
int(key, 0): int(value, 0)
for key, value in self.args.replacement_keymap.items()
})
if replacement_keymap:
new_bind = {key: value for key, value in self.bindings.items()
if key not in replacement_keymap}
for old_key, new_key in replacement_keymap.items():
if old_key in self.bindings:
new_bind[new_key] = self.bindings[old_key]
self.bindings = new_bind
if self.args.has_numpad:
self.numpad_keys = [
replacement_keymap.get(x, x) for x in self.numpad_keys
]
self.all_keys = set(self.bindings.keys())
if self.args.has_numpad:
self.all_keys.update(self.numpad_keys)
self.frontend_proxy = self.ui.InitJSTestObject(
'KeyboardTest', self.layout, self.bindings, self.numpad_keys)
keycodes_to_skip = set(self.args.skip_keycodes)
if self.args.skip_power_key:
keycodes_to_skip.add(_POWER_KEY_CODE)
keycodes_to_skip &= self.all_keys
if self.args.sequential_press or self.args.strict_sequential_press:
self.key_order_list = [
key for key in self.ReadKeyOrder(self.layout) if key in self.all_keys
]
if self.args.has_numpad:
self.key_order_list += self.numpad_keys
else:
self.ui.HideElement('instruction-sequential-numpad')
else:
self.key_order_list = None
self.ui.HideElement('instruction-sequential')
self.ui.HideElement('instruction-sequential-numpad')
if self.args.allow_multi_keys:
self.ui.HideElement('instruction-single-key')
self.down_keys = set()
self.ignored_down_keys = set()
self.last_press_time = 0
self.number_to_press = {}
repeat_times = self.args.repeat_times or {}
default_number_to_press = repeat_times.get('default', 1)
for key in self.all_keys:
if key in keycodes_to_skip:
self.number_to_press[key] = 0
self.MarkKeyState(key, 'skipped')
else:
self.number_to_press[key] = repeat_times.get(
str(key), default_number_to_press)
self.MarkKeyState(key, 'untested')
self.dispatcher = evdev_utils.InputDeviceDispatcher(
self.keyboard_device, self.event_loop.CatchException(self.HandleEvent))
testlog.UpdateParam('malfunction_key',
description='The keycode of malfunction keys')
def tearDown(self):
"""Terminates the running process or we'll have trouble stopping the test.
"""
self.dispatcher.close()
self.keyboard_device.ungrab()
def GetVivaldiKeyboardActionKeys(self):
match = re.search(r'\d+$', self.keyboard_device.path)
if not match:
raise RuntimeError('Failed to get keyboard device ID')
event_id = match.group(0)
file_content = file_utils.ReadFile(
f'/sys/class/input/event{event_id}/device/device/function_row_physmap')
scancodes = [int(s, 16) for s in file_content.strip().split()]
replacement_keymap = {}
if len(scancodes) > 10:
session.console.warning(
f'There are {len(scancodes)} function keys, normally it should be 10.'
' Please check if this pytest actually tests all function keys.')
for (key, scancode) in enumerate(scancodes, 59):
try:
replacement_keymap[key] = _ACTION_KEYS_SCANCODE_TO_KEYCODE[scancode]
except KeyError:
session.console.exception(f'Cannot find keycode of {scancode}')
raise
session.console.info(f'Vivaldi Keyboard Keys: {replacement_keymap}')
return replacement_keymap
def GetKeyboardLayout(self):
"""Uses the given keyboard layout or auto-detect from VPD."""
if self.args.layout:
return self.args.layout
# Use the primary keyboard_layout for testing.
region = process_utils.CheckOutput(['vpd', '-g', 'region']).strip()
return regions.REGIONS[region].keyboard_mechanical_layout
def ReadBindings(self, layout):
"""Reads in key bindings and their associates figure regions."""
| |
uint8_t ns3::Socket::GetIpv6HopLimit() const [member function]
cls.add_method('GetIpv6HopLimit',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function]
cls.add_method('GetIpv6Tclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function]
cls.add_method('IsIpRecvTos',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function]
cls.add_method('IsIpRecvTtl',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function]
cls.add_method('IsIpv6RecvHopLimit',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function]
cls.add_method('IsIpv6RecvTclass',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function]
cls.add_method('IsRecvPktInfo',
'bool',
[],
is_const=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function]
cls.add_method('SetIpRecvTos',
'void',
[param('bool', 'ipv4RecvTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function]
cls.add_method('SetIpRecvTtl',
'void',
[param('bool', 'ipv4RecvTtl')])
## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function]
cls.add_method('SetIpTos',
'void',
[param('uint8_t', 'ipTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function]
cls.add_method('SetIpTtl',
'void',
[param('uint8_t', 'ipTtl')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function]
cls.add_method('SetIpv6HopLimit',
'void',
[param('uint8_t', 'ipHopLimit')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function]
cls.add_method('SetIpv6RecvHopLimit',
'void',
[param('bool', 'ipv6RecvHopLimit')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function]
cls.add_method('SetIpv6RecvTclass',
'void',
[param('bool', 'ipv6RecvTclass')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function]
cls.add_method('SetIpv6Tclass',
'void',
[param('int', 'ipTclass')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTos() const [member function]
cls.add_method('IsManualIpTos',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function]
cls.add_method('IsManualIpTtl',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function]
cls.add_method('IsManualIpv6HopLimit',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function]
cls.add_method('IsManualIpv6Tclass',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor]
cls.add_constructor([])
| |
<gh_stars>10-100
import unittest
import numpy as np
from arrus.devices.device import Device
from arrus.tests.tools import mock_import
# Module mocks.
class Us4OEMMock:
pass
mock_import(
"arrus.devices.ius4oem",
IUs4OEM=Us4OEMMock
)
mock_import(
"arrus.devices.idbarLite",
DBARLite=None
)
mock_import(
"arrus.devices.ihv256",
HV256=None
)
# Project imports.
from arrus.devices.probe import (
Probe,
ProbeHardwareSubaperture,
Subaperture
)
# Class mocks
class Us4OEMCardMock(Device):
def __init__(self, index, n_rx_channels, n_tx_channels, mock_data=None):
super().__init__("Us4OEMCardMock", index)
self.n_tx_channels = n_tx_channels
self.n_rx_channels = n_rx_channels
self.tx_aperture = None
self.tx_delays = [0.0]*self.n_tx_channels
self.tx_frequency = None
self.tx_periods = None
self.rx_apertures = []
self.rx_total_bytes = 0
self.mock_data = mock_data
def get_n_rx_channels(self):
return self.n_rx_channels
def get_n_tx_channels(self):
return self.n_tx_channels
def start_if_necessary(self):
pass
def set_tx_aperture(self, origin, size):
self.tx_aperture = Subaperture(origin, size)
def set_tx_delays(self, delays):
for i, delay in enumerate(delays):
self.tx_delays[i] = delay
def set_tx_delay(self, channel, delay):
self.tx_delays[channel] = delay
def set_tx_frequency(self, f):
self.tx_frequency = f
def set_tx_periods(self, n):
self.tx_periods = n
def set_rx_aperture(self, origin, size):
if self.mock_data is not None:
self.rx_apertures.append(Subaperture(origin, size))
def schedule_receive(self, address, length):
if self.mock_data is not None:
self.rx_total_bytes += length
def sw_trigger(self):
pass
def wait_until_sgdma_finished(self):
pass
def transfer_rx_buffer_to_host(self, dst_array, src_addr):
if self.mock_data is not None:
last_subaperture = self.rx_apertures[-1]
origin, size = last_subaperture.origin, last_subaperture.size
dst_array[:, :] = self.mock_data[:, origin:(origin+size)]
def set_rx_time(self, rx_time):
self.rx_time = rx_time
class ProbeRxTest(unittest.TestCase):
def test_probe_sets_rx_for_two_cards(self):
pass
# Set.
# TODO(pjarosik) consider removing or fixing this test
# hw_subapertures = [
# ProbeHardwareSubaperture(
# card=Us4OEMCardMock(
# 0, n_rx_channels=32, n_tx_channels=128,
# mock_data=np.tile(np.array(range(0, 128)), 4096).reshape((4096, 128))
# ),
# origin=0,
# size=128
# ),
# ProbeHardwareSubaperture(
# card=Us4OEMCardMock(
# 1, n_rx_channels=32, n_tx_channels=128,
# mock_data=np.tile(np.array(range(128, 192)), 4096).reshape((4096, 64))
# ),
# origin=0,
# size=64
# )
# ]
#
# # Run.
# probe = self._create_probe(hw_subapertures, 0)
# tx_aperture = Subaperture(0, 192)
# tx_delays = list(range(0, 192))
# carrier_frequency = 14e6
# n_periods = 1
# rf = probe.transmit_and_record(
# tx_aperture=tx_aperture,
# tx_delays=tx_delays,
# carrier_frequency=carrier_frequency,
# n_tx_periods=n_periods,
# n_samples=4096
# )
# # Verify.
# card0 = hw_subapertures[0].card
# card1 = hw_subapertures[1].card
# self.assertListEqual(
# [
# Subaperture(0, 32),
# Subaperture(32, 32),
# Subaperture(64, 32),
# Subaperture(96, 32)
# ],
# card0.rx_apertures)
# self.assertEqual(128*4096*probe.dtype.itemsize, card0.rx_total_bytes)
# self.assertListEqual(
# [
# Subaperture(0, 32),
# Subaperture(32, 32),
# ],
# card1.rx_apertures)
# self.assertEqual(64*4096*probe.dtype.itemsize, card1.rx_total_bytes)
# # First row of RF matrix contains expected pattern.
# self.assertTrue((rf[0, :] == list(range(0, 192))).all())
# # All rows are the same.
# self.assertTrue((rf[0, :] == rf).all())
def _create_probe(self, apertures, master_card_idx):
return Probe(
index=0,
model_name="test_probe",
hw_subapertures=apertures,
master_card=apertures[master_card_idx].card,
pitch=0.245e-3
)
class ProbeTxTest(unittest.TestCase):
def test_probe_sets_tx_for_single_card(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=32 # Determines the size of the probe's aperture.
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 32)
tx_delays = list(range(0, 32))
carrier_frequency = 5e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(tx_aperture, hw_subapertures[0].card.tx_aperture)
self.assertEqual(carrier_frequency, hw_subapertures[0].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[0].card.tx_periods)
self._assert_card_delays(
[
tx_delays + [0.0]*96
],
hw_subapertures
)
def test_probe_sets_tx_for_single_card_hw_offset(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=34,
size=32
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 16)
tx_delays = list(range(0, 16))
carrier_frequency = 5e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(34, 16),
hw_subapertures[0].card.tx_aperture)
self.assertEqual(carrier_frequency, hw_subapertures[0].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[0].card.tx_periods)
self._assert_card_delays(
[
34*[0.0] + tx_delays + [0.0]*(128-(34+len(tx_delays))),
],
hw_subapertures
)
def test_probe_sets_tx_for_single_card_offset_origin(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(14, 32)
tx_delays = list(range(0, 32))
carrier_frequency = 10e6
n_periods = 2
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(tx_aperture, hw_subapertures[0].card.tx_aperture)
self.assertEqual(carrier_frequency, hw_subapertures[0].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[0].card.tx_periods)
self._assert_card_delays(
[
14*[0.0] + tx_delays + [0.0]*(128-(14+len(tx_delays))),
],
hw_subapertures
)
def test_probe_sets_tx_for_two_cards(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 192)
tx_delays = list(range(0, 192))
carrier_frequency = 14e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(0, 128),
hw_subapertures[0].card.tx_aperture)
self.assertEqual(Subaperture(0, 64),
hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
list(range(0, 128)),
list(range(128, 192)) + [0.0]*64
],
hw_subapertures
)
for hws in hw_subapertures:
self.assertEqual(carrier_frequency, hws.card.tx_frequency)
self.assertEqual(n_periods, hws.card.tx_periods)
def test_probe_sets_tx_for_two_cards_complete_apertures(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 256)
tx_delays = list(range(0, 256))
carrier_frequency = 14e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(0, 128),
hw_subapertures[0].card.tx_aperture)
self.assertEqual(Subaperture(0, 128),
hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
list(range(0, 128)),
list(range(128, 256))
],
hw_subapertures
)
for hws in hw_subapertures:
self.assertEqual(carrier_frequency, hws.card.tx_frequency)
self.assertEqual(n_periods, hws.card.tx_periods)
def test_probe_sets_tx_for_two_cards_only_first_card_aperture(self):
# Two cards, tx aperture only on the first card
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(16, 64)
tx_delays = list(range(0, 64))
carrier_frequency = 8e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(16, 64),
hw_subapertures[0].card.tx_aperture)
self.assertIsNone(hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
[0.0]*16 + list(range(0, 64)) + [0.0]*(128-(16+len(tx_delays))),
[0.0]*128
],
hw_subapertures
)
self.assertEqual(carrier_frequency, hw_subapertures[0].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[0].card.tx_periods)
def test_probe_sets_tx_for_two_cards_only_second_card_aperture(self):
# Two cards, tx aperture only on the second card
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(130, 32)
tx_delays = list(range(0, 32))
carrier_frequency = 8e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(2, 32),
hw_subapertures[1].card.tx_aperture)
self.assertIsNone(hw_subapertures[0].card.tx_aperture)
self._assert_card_delays(
[
[0.0]*128,
[0.0]*2 + list(range(0, 32)) + [0.0]*(128-(2+len(tx_delays))),
],
hw_subapertures
)
self.assertEqual(carrier_frequency, hw_subapertures[1].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[1].card.tx_periods)
def test_probe_sets_tx_apeture_two_cards_second_hw_offset(self):
# Two cards, second one's aperture starts at origin > 0
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=14,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 192)
tx_delays = list(range(0, 192))
carrier_frequency = 8e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(0, 128),
hw_subapertures[0].card.tx_aperture)
self.assertEqual(Subaperture(14, 64),
hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
list(range(0, 128)),
[0.0]*14 + list(range(128, 192)) + [0.0]*50
],
hw_subapertures
)
for hws in hw_subapertures:
self.assertEqual(carrier_frequency, hws.card.tx_frequency)
self.assertEqual(n_periods, hws.card.tx_periods)
def test_single_element_aperture_card1(self):
# Two cards, single element aperture near right border of the first card aperture
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(127, 1)
tx_delays = [1]
carrier_frequency = 8e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertEqual(Subaperture(127, 1),
hw_subapertures[0].card.tx_aperture)
self.assertIsNone(hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
[0.0]*127 + [1],
[0.0]*128
],
hw_subapertures
)
self.assertEqual(carrier_frequency, hw_subapertures[0].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[0].card.tx_periods)
def test_single_element_aperture_card2(self):
# Two cards, single element aperture near left bofder of the second card aperture
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(128, 1)
tx_delays = [1]
carrier_frequency = 8e6
n_periods = 1
probe.transmit_and_record(
tx_aperture=tx_aperture,
tx_delays=tx_delays,
carrier_frequency=carrier_frequency,
n_tx_periods=n_periods
)
# Verify.
self.assertIsNone(hw_subapertures[0].card.tx_aperture)
self.assertEqual(Subaperture(0, 1),
hw_subapertures[1].card.tx_aperture)
self._assert_card_delays(
[
[0.0]*128,
[1] + [0.0]*127
],
hw_subapertures
)
self.assertEqual(carrier_frequency, hw_subapertures[1].card.tx_frequency)
self.assertEqual(n_periods, hw_subapertures[1].card.tx_periods)
def _create_probe(self, apertures, master_card_idx):
return Probe(
index=0,
model_name="test_probe",
hw_subapertures=apertures,
master_card=apertures[master_card_idx].card,
pitch=0.245e-3
)
def _assert_card_delays(
self,
expected_delays,
hw_subapertures
):
for hws, expected_delay in zip(hw_subapertures, expected_delays):
card = hws.card
self.assertListEqual(expected_delay, card.tx_delays)
class ProbeSetTxApertureTest(unittest.TestCase):
#TODO(pjarosik) remove ProbeTxTest
def test_probe_sets_tx_for_single_card(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=32 # Determines the size of the probe's aperture.
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 32)
probe.set_tx_aperture(tx_aperture=tx_aperture)
# Verify.
self.assertEqual(tx_aperture, hw_subapertures[0].card.tx_aperture)
def test_probe_sets_tx_for_single_card_hw_offset(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=34,
size=32
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 16)
probe.set_tx_aperture(tx_aperture=tx_aperture)
# Verify.
self.assertEqual(Subaperture(34, 16),
hw_subapertures[0].card.tx_aperture)
def test_probe_sets_tx_for_single_card_offset_origin(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(14, 32)
probe.set_tx_aperture(tx_aperture=tx_aperture)
# Verify.
self.assertEqual(tx_aperture, hw_subapertures[0].card.tx_aperture)
def test_probe_sets_tx_for_two_cards(self):
# Set.
hw_subapertures = [
ProbeHardwareSubaperture(
card=Us4OEMCardMock(0, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=128
),
ProbeHardwareSubaperture(
card=Us4OEMCardMock(1, n_rx_channels=32, n_tx_channels=128),
origin=0,
size=64
)
]
# Run.
probe = self._create_probe(hw_subapertures, 0)
tx_aperture = Subaperture(0, 192)
probe.set_tx_aperture(tx_aperture=tx_aperture)
# Verify.
self.assertEqual(Subaperture(0, 128),
hw_subapertures[0].card.tx_aperture)
| |
#!/usr/bin/env python
"""A widget to display changing values in real time as a strip chart
Known issues:
Matplotlib's defaults present a number of challenges for making a nice strip chart display.
Here are manual workarounds for some common problems:
- Memory Leak:
Matplotlib 1.0.0 has a memory leak in canvas.draw(), at least when using TgAgg:
<https://sourceforge.net/tracker/?func=detail&atid=560720&aid=3124990&group_id=80706>
Unfortunately canvas.draw is only way to update the display after altering the x/time axis.
Thus every StripChartWdg will leak memory until the matplotlib bug is fixed;
the best you can do is reduce the leak rate by increasing updateInterval.
- Jumping Ticks:
By default the major time ticks and grid jump to new values as time advances. I haven't found an
automatic way to keep them steady, but you can do it manually by following these examples:
# show a major tick every 10 seconds on even 10 seconds
stripChart.xaxis.set_major_locator(matplotlib.dates.SecondLocator(bysecond=range(0, 60, 10)))
# show a major tick every 5 seconds on even 5 minutes
stripChart.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 60, 5)))
- Reducing The Spacing Between Subplots:
Adjacent subplots are rather widely spaced. You can manually shrink the spacing but then
the major Y labels will overlap. Here is a technique that includes "pruning" the top major tick label
from each subplot and then shrinking the subplot horizontal spacing:
for subplot in stripChartWdg.subplotArr:
subplot.yaxis.get_major_locator().set_params(prune = "upper")
stripChartWdg.figure.subplots_adjust(hspace=0.1)
- Truncated X Axis Labels:
The x label is truncated if the window is short, due to poor auto-layout on matplotlib's part.
Also the top and sides may have too large a margin. <NAME> provided code that should solve the
issue automatically, but I have not yet incorporated it. You can try the following manual tweak:
(values are fraction of total window height or width, so they must be in the range 0-1):
stripChartWdg.figure.subplots_adjust(bottom=0.15) # top=..., left=..., right=...
Unfortunately, values that look good at one window size may not be suitable at another.
- Undesirable colors and font sizes:
If you are unhappy with the default choices of font size and background color
you can edit the .matplotlibrc file or make settings programmatically.
Some useful programmatic settings:
# by default the background color of the outside of the plot is gray; set using figure.facecolor:
matplotlib.rc("figure", facecolor="white")
# by default legends have large text; set using legend.fontsize:
matplotlib.rc("legend", fontsize="medium")
Requirements:
- Requires matplotlib built with TkAgg support
Acknowledgements:
I am grateful to <NAME>, <NAME> and others on matplotlib-users
for advice on tying the x axes together and improving the layout.
History:
2010-09-29 ROwen
2010-11-30 ROwen Fixed a memory leak (Line._purgeOldData wasn't working correctly).
2010-12-10 ROwen Document a memory leak caused by matplotlib's canvas.draw.
2010-12-23 ROwen Backward-incompatible changes:
- addPoint is now called on the object returned by addLine, not StripChartWdg.
This eliminate the need to give lines unique names.
- addPoint is silently ignored if y is None
- addLine and addConstantLine have changed:
- There is no "name" argument; use label if you want a name that shows up in legends.
- The label does not have to be unique.
- They return an object.
Added removeLine method.
2010-12-29 ROwen Document useful arguments for addLine.
2012-05-31 ROwen Add a clear method to StripChartWdg and _Line.
2012-06-04 ROwen Reduce CPU usage by doing less work if not visible (not mapped).
2012-07-09 ROwen Modified to use opscore.RO.TkUtil.Timer.
2012-09-18 ROwen Explicitly import matplotlib.dates to avoid a problem with matplotlib 1.2.0rc1
2015-09-24 ROwen Replace "== None" with "is None" to modernize the code.
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
"""
__all__ = ["StripChartWdg"]
import bisect
import datetime
import time
import numpy
from six.moves import tkinter
import matplotlib
import matplotlib.dates
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from opscore.RO.TkUtil import Timer
class StripChartWdg(tkinter.Frame):
"""A widget to changing values in real time as a strip chart
Usage Hints:
- For each variable quantity to display:
- Call addLine once to specify the quantity
- Call addPoint for each new data point you wish to display
- For each constant line (e.g. limit) to display call addConstantLine
- To make sure a plot includes one or two y values (e.g. 0 or a range of values) call showY
- To manually scale a Y axis call setYLimits (by default all y axes are autoscaled).
- All supplied times are POSIX timestamps (e.g. as supplied by time.time()).
You may choose the kind of time displayed on the time axis (e.g. UTC or local time) using cnvTimeFunc
and the format of that time using dateFormat.
Known Issues:
matplotlib's defaults present a number of challenges for making a nice strip chart display.
Some issues and manual solutions are discussed in the main file's document string.
Potentially Useful Attributes:
- canvas: the matplotlib FigureCanvas
- figure: the matplotlib Figure
- subplotArr: list of subplots, from top to bottom; each is a matplotlib Subplot object,
which is basically an Axes object but specialized to live in a rectangular grid
- xaxis: the x axis shared by all subplots
"""
def __init__(self,
master,
timeRange = 3600,
numSubplots = 1,
width = 8,
height = 2,
showGrid = True,
dateFormat = "%H:%M:%S",
updateInterval = None,
cnvTimeFunc = None,
):
"""Construct a StripChartWdg with the specified time range
Inputs:
- master: Tk parent widget
- timeRange: range of time displayed (seconds)
- width: width of graph in inches
- height: height of graph in inches
- numSubplots: the number of subplots
- showGrid: if True a grid is shown
- dateFormat: format for major axis labels, using time.strftime format
- updateInterval: now often the time axis is updated (seconds); if None a value is calculated
- cnvTimeFunc: a function that takes a POSIX timestamp (e.g. time.time()) and returns matplotlib days;
typically an instance of TimeConverter; defaults to TimeConverter(useUTC=False)
"""
tkinter.Frame.__init__(self, master)
self._timeRange = timeRange
self._isVisible = self.winfo_ismapped()
self._isFirst = True
if updateInterval is None:
updateInterval = max(0.1, min(5.0, timeRange / 2000.0))
self.updateInterval = float(updateInterval)
# print "updateInterval=", self.updateInterval
if cnvTimeFunc is None:
cnvTimeFunc = TimeConverter(useUTC=False)
self._cnvTimeFunc = cnvTimeFunc
# how many time axis updates occur before purging old data
self._maxPurgeCounter = max(1, int(0.5 + (5.0 / self.updateInterval)))
self._purgeCounter = 0
self.figure = matplotlib.figure.Figure(figsize=(width, height), frameon=True)
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.canvas.get_tk_widget().grid(row=0, column=0, sticky="news")
self.canvas.mpl_connect('draw_event', self._handleDrawEvent)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
bottomSubplot = self.figure.add_subplot(numSubplots, 1, numSubplots)
self.subplotArr = [self.figure.add_subplot(numSubplots, 1, n+1, sharex=bottomSubplot) \
for n in range(numSubplots-1)] + [bottomSubplot]
if showGrid:
for subplot in self.subplotArr:
subplot.grid(True)
self.xaxis = bottomSubplot.xaxis
bottomSubplot.xaxis_date()
self.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(dateFormat))
# dictionary of constant line name: (matplotlib Line2D, matplotlib Subplot)
self._constLineDict = dict()
for subplot in self.subplotArr:
subplot._scwLines = [] # a list of contained _Line objects;
# different than the standard lines property in that:
# - lines contains Line2D objects
# - lines contains constant lines as well as data lines
subplot._scwBackground = None # background for animation
subplot.label_outer() # disable axis labels on all but the bottom subplot
subplot.set_ylim(auto=True) # set auto scaling for the y axis
self.bind("<Map>", self._handleMap)
self.bind("<Unmap>", self._handleUnmap)
self._timeAxisTimer = Timer()
self._updateTimeAxis()
def addConstantLine(self, y, subplotInd=0, **kargs):
"""Add a new constant to plot
Inputs:
- y: value of constant line
- subplotInd: index of subplot
- All other keyword arguments are sent to the matplotlib Line2D constructor
to control the appearance of the data. See addLine for more information.
"""
subplot = self.subplotArr[subplotInd]
line2d = subplot.axhline(y, **kargs)
yMin, yMax = subplot.get_ylim()
if subplot.get_autoscaley_on() and numpy.isfinite(y) and not (yMin <= y <= yMax):
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
return line2d
def addLine(self, subplotInd=0, **kargs):
"""Add a new quantity to plot
Inputs:
- subplotInd: index of subplot
- All other keyword arguments are sent to the matplotlib Line2D constructor
to control the appearance of the data. Useful arguments include:
- label: name of line (displayed in a Legend)
- color: color of line
- linestyle: style of line (defaults to a solid line); "" for no line, "- -" for dashed, etc.
- marker: marker shape, e.g. "+"
Please do not attempt to control other sorts of line properties, such as its data.
Arguments to avoid include: animated, data, xdata, ydata, | |
<reponame>abagusetty/Uintah
#!/usr/bin/env python3
import subprocess
import sys
import re
'''
upsDiff version 1.0 written by <NAME> on 2/15/19
This script compares two .ups files for differences based on XML paths. Input files are assumed to have valid XML syntax.
The XML paths of unique leaf nodes not found in the opposing ups file are output. Nodes marked with '*' have siblings of the same name.
Multiple asterisks indicate multiple similarly named siblings. Please manually compare similar siblings of the same file to determine
the best matching node in the opposing input file.
Note: This script may fail if elements or attributes contain any of these characters: <, >, /, *
Usage: upsDiff.py file1.ups file2.ups <optional args>
Optional arguments:")
--no-output |Do not display output. Does not apply to error messages. Overrules all other options.
--ellipsis <int> |Truncate path differences after <int> number of different elements. Default is 1. Setting 0 for <int> will turn truncation off.
'''
# ____________________________________________________________
# Codes returned by this script
returnCodeNoDifference = 0
returnCodeDifferencesFound = 1
returnCodeArgumentError = 2
returnCodeFileNotFound = 3
# ______________________________________________________________________
# Classes
# ____________________________________________________________
# Tree Class used to store XML hierarchy
class Tree(object):
# ____________________________________________________________
# Tree constructor
def __init__(self, _attribute, _element):
# Tree info
self.numChildren = 0
self.children = []
self.parent = None
# XML info
self.attribute = _attribute
self.element = _element
# end init()
# ____________________________________________________________
# Add a child tree to a node of this.tree
def addChildTree(self, newChildTree):
# New child's parent becomes this tree
newChildTree.parent = self
# add child to next available index in the parent tree
self.children.append(newChildTree)
self.children = sorted(self.children, key=lambda tree: tree.element)
# increment num children
self.numChildren = self.numChildren + 1
# end addChildTree()
# ____________________________________________________________
# Check if two tree nodes are equal by comparing elements and attribute
def equalsNode(self, otherNode):
if self.element == otherNode.element and self.attribute == otherNode.attribute:
return True
return False
# end equalsNode()
# ____________________________________________________________
# Generates a string representation of a node. Used in Tree.getPath()
def nodeToString(self):
returnString = ""
# Check if this node has siblings of the same name
if self.parent is not None:
for sibling in self.parent.children:
if sibling is not self and self.equalsNode(sibling):
returnString = returnString + "*"
if self.attribute is None:
returnString = returnString + self.element
else:
returnString = returnString + self.element + "/" + self.attribute
return returnString
# end nodeToString()
# ____________________________________________________________
# Generate the XML path of a node
def getPath(self):
path = self.nodeToString()
nextParent = self.parent
while nextParent.parent is not None:
path = nextParent.nodeToString() + "/" + path
nextParent = nextParent.parent
return path
# end getPath()
# End Tree Class
# ______________________________________________________________________
# Functions
# ____________________________________________________________
# Read in file, save XML hierarchy as a tree
def generateXMLTree(path):
# Create output tree
xmlTree = Tree(None, path)
currentNode = xmlTree
# Open and read file
with open(path, 'r') as file:
fileData = file.read()
# Split the data to create a collection of <token>
tokens = re.split('(<[^>]*>)', fileData)
# First token (tokens[0]) is always empty, therefor the true
# start index is always > 0.
# The next token is usually the XML encoding.
# Something like <?xml version="1.0" encoding="iso-8859-1"?>
# If so, skip that token too
if tokens[1][0:2] == "<?" and tokens[1][-2:] == "?>":
startIdx = 2
else:
startIdx = 1
# Loop through each token to process
currentlyInComment = False
for idx in range(startIdx, len(tokens)):
# Set current token while removing leading/trailing whitespace
token = tokens[idx].replace('\n', '').strip()
# Skip empty tokens
if token == "":
continue
# If token start a comment, turn on currentlyInComment bool
if (not currentlyInComment) and token[0:4] == "<!--":
currentlyInComment = True
# Keep continuing, until the end of comment is found, then turn off currentlyInComment bool
if currentlyInComment:
if token[-3:] == "-->":
currentlyInComment = False
continue # Skip current token because it is part of a comment
else:
continue # Skip current token because it is the end of a comment ("-->")
# Now that we are sure the current token isn't garbage, determine what kind of token.
# 1. Normal close: </element>
if len(token) > 3 and token[1] == "/":
currentNode = currentNode.parent
# 2. Shorthand open and close: <token attribute />
elif len(token) > 3 and token[-2] == "/":
node = Tree(None, token)
currentNode.addChildTree(node)
# 3. Normal open: <element> or <element attribute>
elif len(token) > 2 and token[0] == "<" and token[-1] == ">":
attribute = None
element = token
node = Tree(attribute, element)
currentNode.addChildTree(node)
currentNode = node
# 4. Token is just <element> attribute </element>
else:
currentNode.attribute = token
# } end token type if branches
# } end for loop through tokens
return xmlTree
# end generateXMLPaths()
# ____________________________________________________________
# Generates and returns a list of leaf-nodes xml paths from a tree. Driver for recursive implementation
def generateTreePaths(tree):
paths = []
generateTreePathsRecursive(tree, paths)
return paths
# end generateTreePaths()
# ____________________________________________________________
# Recursive method to traverse a tree down to the leaf-nodes. Compiles a list of leaf-node XML paths
def generateTreePathsRecursive(tree, paths):
for child in tree.children:
if len(child.children) == 0:
paths.append(child.getPath())
generateTreePathsRecursive(child, paths)
# end generateTreePathsRecursive()
# ____________________________________________________________
# Compares two lists of paths for differences. No output is displayed
def processDifferencesNoOutput(list1, list2):
# Call helper method to generate differences
[uniqueToFirst, uniqueToSecond] = setDifferenceHelper(list1, list2)
# If length of inputs is 0, no differences
length1 = len(uniqueToFirst)
length2 = len(uniqueToSecond)
if length1 == 0 and length2 == 0:
return returnCodeNoDifference
else:
return returnCodeDifferencesFound
# end processDiffs()
# ____________________________________________________________
# Compares two lists of paths for differences.
def processDifferences(list1, list2, tree1Name, tree2Name, tree1, tree2):
# Call helper method to generate differences
[uniqueToFirst, uniqueToSecond] = setDifferenceHelper(list1, list2)
# If length of inputs is 0, no differences
length1 = len(uniqueToFirst)
length2 = len(uniqueToSecond)
if length1 == 0 and length2 == 0:
print("No differences")
return returnCodeNoDifference
else:
print("Differences detected:")
# Otherwise, print differences then return
if length1 > 0:
printPaths(uniqueToFirst, tree2, tree1Name, "<")
print("\n---")
if length2 > 0:
printPaths(uniqueToSecond, tree1, tree2Name, ">")
print()
return returnCodeDifferencesFound
# end processDiffs()
# ____________________________________________________________
# Converts two lists to sets, generates the difference of A-B and B-A
# Helper method to eliminate redundant operations
def setDifferenceHelper(list1, list2):
# Convert lists to sets for easy comparison
A = set(list1)
B = set(list2)
uniqueToFirst = A - B
uniqueToSecond = B - A
return [uniqueToFirst, uniqueToSecond]
# end setHelper()
# ____________________________________________________________
# Prints every path from a list of path, in sorted order.
# Each path is compared to the other tree.
# If the non-matching part has a long path, print the first non-matching elements + "/..."
def printPaths(nodes, otherTree, thisTreeFileName, angleBracket):
setOfPaths = set()
lineNumbers = []
for node in sorted(nodes):
diffIdx = findEquivalentPartialPath(node, otherTree)
splitByPathParts = node.split("/")
printPath = ""
# Add matching part of path
for partOfPath in splitByPathParts[0:diffIdx]:
printPath = printPath + partOfPath + "/"
addLastBit = True
# Add non-matching part of path. Previously added this part with a different color.
# This block of code could most likely be consolidated with the previous block. All we really need now is the diffIdx
for i in range(0, len(splitByPathParts[diffIdx:-1])):
partOfPath = splitByPathParts[diffIdx:-1][i]
# if the non-matching part has a long path
if ellipsisOn:
if len(splitByPathParts) - diffIdx > 1 and i > ellipsisLevel:
printPath = printPath + "..."
addLastBit = False
break
# else
printPath = printPath + partOfPath + "/"
# Add last element separate to exclude the "/"
if addLastBit:
printPath = printPath + splitByPathParts[-1]
if not setOfPaths.__contains__(printPath):
setOfPaths.add(printPath)
lineNumb = getLineNumber(printPath, thisTreeFileName, diffIdx)
lineNumbers.append(lineNumb)
#Print
i = 0
for path in sorted(setOfPaths):
print(angleBracket, " ", path, end=" Line ")
print(lineNumbers[i])
i += 1
if "*" in path:
print(" Element with difference has at least one sibling of the same name. Please manually compare both/all similar siblings." )
# end printPaths()
# ____________________________________________________________
# Find the line where the difference occurs
def getLineNumber(path, inputFileName, divergeIdx):
pathParts = path.split("/")
divergePart = pathParts[divergeIdx]
line1 = runGrep(divergePart, inputFileName)
# If grep returned multiple instances of the search term, check the previous term.
# The true line number should be the closest number >= the previous term's line | |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_dvh_metrics = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
df_clinical_criteria = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
weights_list = []
weight_columns = []
# Iterate through each prediction in the list of prediction_names
for prediction in prediction_names:
# Make a dataloader that loads predicted dose distributions
prediction_paths = get_paths(f'{cs.prediction_dir}/{prediction}', ext='csv')
prediction_dose_loader = DataLoader(prediction_paths, mode_name='predicted_dose') # Set prediction loader
# Evaluate predictions and plans with respect to ground truth
dose_evaluator = EvaluateDose(patient_data_loader, prediction_dose_loader)
populate_error_dfs(dose_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
'Prediction')
# Make dataloader for plan dose distributions
for opt_name in optimizer_names:
print(opt_name)
# Get the paths of all optimized plans for prediction
cs.get_optimization_directories(prediction, opt_name)
weights_list, weight_columns = populate_weights_df(cs, weights_list)
populate_solve_time_df(cs, df_solve_time)
# Make data loader to load plan doses
plan_paths = get_paths(cs.plan_dose_from_pred_dir, ext='csv') # List of all plan dose paths
plan_dose_loader = DataLoader(plan_paths, mode_name='predicted_dose') # Set plan dose loader
plan_evaluator = EvaluateDose(patient_data_loader, plan_dose_loader) # Make evaluation object
# Ignore prediction name if no data exists, o/w populate DataFrames
if not patient_data_loader.file_paths_list:
print('No patient information was given to calculate metrics')
else:
# Evaluate prediction errors
populate_error_dfs(plan_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
opt_name)
# Clean up weights
weights_df = pd.DataFrame(weights_list, columns=weight_columns)
weights_df.set_index(['Objective', 'Structure', 'Patients', 'Dose_type', 'Prediction'], inplace=True)
weights_df = weights_df.unstack('Prediction')
# Save dose and DVH error DataFrames
df_dose_error.to_csv(consolidate_data_paths['dose'])
df_dvh_metrics.to_csv(consolidate_data_paths['dvh'])
df_clinical_criteria.to_csv(consolidate_data_paths['clinical_criteria'])
weights_df.to_csv(consolidate_data_paths['weights'])
df_solve_time.to_csv(consolidate_data_paths['solve_time'])
# Loads the DataFrames that contain consolidated data
df_dose_error = pd.read_csv(consolidate_data_paths['dose'], index_col=[0, 1])
df_dvh_metrics = pd.read_csv(consolidate_data_paths['dvh'], index_col=[0, 1, 2, 3])
df_clinical_criteria = pd.read_csv(consolidate_data_paths['clinical_criteria'], index_col=[0, 1, 2, 3])
df_ref_dvh_metrics = pd.read_csv(consolidate_data_paths['ref_dvh'], index_col=[0, 1, 2, 3], squeeze=True)
df_ref_dvh_metrics.index.set_names(df_dvh_metrics.index.names, inplace=True)
df_ref_clinical_criteria = pd.read_csv(consolidate_data_paths['ref_clinical_criteria'], index_col=[0, 1, 2, 3],
squeeze=True)
df_ref_clinical_criteria.index.set_names(df_clinical_criteria.index.names, inplace=True)
df_objective_data = pd.read_csv(consolidate_data_paths['weights'], index_col=[0, 1, 2, 3], header=[0, 1])
df_solve_time = pd.read_csv(consolidate_data_paths['solve_time'], index_col=[0, 1]).drop('Prediction', axis=0,
level=0)
# Adjust DVH metric signs to reflect direction of "better"
df_dvh_metrics.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_clinical_criteria.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_ref_dvh_metrics.loc[:, :, ['D_95', 'D_99'], :] *= -1
df_ref_clinical_criteria.loc[:, :, ['D_95', 'D_99'], :] *= -1
return df_dose_error, df_dvh_metrics, df_clinical_criteria, df_ref_dvh_metrics, df_ref_clinical_criteria, df_objective_data, df_solve_time
def make_error_and_metric_indices(patient_names: List[str], dose_evaluator_sample: EvaluateDose, optimizers: List[str]) \
-> [Dict, Dict]:
"""
Initialize the data frame indices for the dose error and DVH metric DataFrames
Args:
patient_names: list of patient names/identifiers
dose_evaluator_sample: A sample of the dose evaluator object that will be used during the processing stage
optimizers: list of optimizer names
Returns:
dose_error_dict: Dictionaries with stored indices (dose type, patients) for dose error
dvh_metric_dict: Dictionaries with stored indices (dose types, patients) for DVH metrics
"""
iterables = [['Prediction', *optimizers], patient_names, dose_evaluator_sample.metric_difference_df.columns]
iterables_with_tuple = list(it_product(*iterables))
iterables_new = []
for i in iterables_with_tuple:
iterables_new.append((i[0], i[1], i[2][0], i[2][1]))
dose_error_indices = [iterables[0], iterables[1]]
dvh_metric_indices = list(zip(*iterables_new))
# Set names
dose_error_dict = {'iterables': dose_error_indices, 'names': ["Dose_type", "Patients"]}
dvh_metric_dict = {'arrays': dvh_metric_indices, 'names': ["Dose_type", "Patients", "Metric", "Structure"]}
return dose_error_dict, dvh_metric_dict
def populate_error_dfs(evaluator: EvaluateDose, df_dose_error: pd.DataFrame, df_dvh_metrics: pd.DataFrame,
df_clinical_criteria: pd.DataFrame, prediction_name: str, dose_type: str):
"""
Populates the DataFrames that summarize
Args:
evaluator: An EvaluateDose Object that will be summarized
df_dose_error: The DataFrame that contains dose errors
df_dvh_metrics: The DataFrame that contains DVH metrics
df_clinical_criteria: THe DataFrame that contains clinical criteria performace
prediction_name: The name of the prediction model
dose_type: The type of dose (e.g., reference, prediction, optimization model)
"""
# Evaluate prediction errors
evaluator.make_metrics()
# Save collection of dose errors
dose_indices = evaluator.dose_score_vec.index
df_dose_error.loc[(dose_type, dose_indices), prediction_name] = evaluator.dose_score_vec[dose_indices].values
# Populate the DVH errors
evaluated_dvh_metrics = evaluator.melt_dvh_metrics(dose_type)
df_dvh_metrics.loc[evaluated_dvh_metrics.index, prediction_name] = evaluated_dvh_metrics.values
# Populate clinical criteria metrics
evaluated_clinical_criteria = evaluator.melt_dvh_metrics(dose_type, dose_metrics_att='new_criteria_metric_df')
df_clinical_criteria.loc[evaluated_clinical_criteria.index, prediction_name] = evaluated_clinical_criteria.values
def populate_weights_df(cs: ModelParameters, weights_list) -> [List, List]:
"""
Populated a list (weights_list) with data related to cost function (e.g., structure, objective function values)
Args:
cs: Constant object
weights_list: List of weights that will be populated
Returns:
weights_list: List of populated weights
weights_list_column_headers: Column headers for list
"""
# Initialize information for plan weights
plan_weights_paths = get_paths(cs.plan_weights_from_pred_dir, ext='csv')
plan_weights_loader = DataLoader(plan_weights_paths, mode_name='plan_weights')
weights_list_column_headers = []
# Load weight info for each patient
for batch_idx in range(plan_weights_loader.number_of_batches()):
data_batch = plan_weights_loader.get_batch(batch_idx)
pt_id = data_batch['patient_list'][0]
plan_weights = data_batch['plan_weights'][0]
# Separate objective function from structure
roi_criteria_pairs = plan_weights.apply(lambda x: pd.Series(x['Objective'].split(' ', 1)), axis=1)
plan_weights['Structure'] = roi_criteria_pairs[0]
plan_weights['Objective'] = roi_criteria_pairs[1]
# Adjust plan weights DataFrame with plan/patient data
plan_weights['Patients'] = pt_id
plan_weights['Dose_type'] = cs.opt_name
plan_weights['Prediction'] = cs.prediction_name
# Extend weight data to weight list
weights_list.extend(plan_weights.values.tolist())
weights_list_column_headers = plan_weights.columns.to_list()
return weights_list, weights_list_column_headers
def populate_solve_time_df(cs: ModelParameters, df_solve_time: pd.DataFrame):
"""
Populated a DataFrame (df_solve_time) with data related to solve time and plan (optimization) gap
Args:
cs: Constants object
df_solve_time: DataFrame with solve time information
"""
# Initialize plan gap/solve time information
plan_gap_paths = get_paths(cs.plan_gap_from_pred_dir, ext='csv')
plan_gap_loader = DataLoader(plan_gap_paths, mode_name='plan_gap')
# Load solve time/gap for each patient
for batch_idx in range(plan_gap_loader.number_of_batches()):
data_batch = plan_gap_loader.get_batch(batch_idx)
pt_id = data_batch['patient_list'][0]
plan_gap = data_batch['plan_gap'][0]
# Populate summary dataframe with time/gap info
df_solve_time.loc[(cs.opt_name, pt_id), cs.prediction_name] = plan_gap['solve time']
def summarize_scores(cs: ModelParameters, df_errors: pd.DataFrame, name: str, level=0) -> pd.DataFrame:
"""
Args:
cs: Model constants
df_errors: DataFrame of errors that can be converted into a score by taking average for every prediction/opt
name: Name of score that will be generated
level: Level of df_errors that average is calculated over
Returns:
ranked_scores:
"""
# Calculate scores
score = round(df_errors.mean(axis=0, level=level), 3)
score = score.loc[cs.optimization_short_hands_dict.keys()]
rank = score.rank(axis=1)
# Set order based on prediction rank
sorted_scores = score.sort_values(by='Prediction', axis=1).columns
# Rename index prior to concatenating the data
score.index = score.index.map(lambda x: f'{x} {name.lower()} score')
rank.index = rank.index.map(lambda x: f'{x} {name.lower()} rank')
# Concat scores and rank, ordered based on prediction rank
ranked_scores = pd.concat((score[sorted_scores], rank[sorted_scores])).T
# Alternate between score and rank
score_rank_column_order = []
for idx, dose_type in enumerate(score.index):
score_rank_column_order.extend(ranked_scores.columns[idx::len(score.index)].to_list())
ranked_scores = ranked_scores[score_rank_column_order]
# Convert ranks to integers
ranked_scores[score_rank_column_order[1::2]] = ranked_scores[score_rank_column_order[1::2]].astype(int)
# Prep ranked scores for table
ranked_scores_for_csv = ranked_scores.copy(deep=True)
ranked_scores_for_csv[score_rank_column_order[2::2]] = ranked_scores_for_csv[
score_rank_column_order[2::2]].subtract(ranked_scores_for_csv[f'Prediction {name.lower()} score'].values,
axis=0)
ranked_scores_for_csv.set_index(f'Prediction {name.lower()} rank', drop=False, inplace=True)
ranked_scores_for_csv[score_rank_column_order[1::2]] = ranked_scores_for_csv[
score_rank_column_order[1::2]].applymap(lambda x: f' ({str(x)})')
ranked_scores_for_csv[score_rank_column_order[0::2]] = ranked_scores_for_csv[
score_rank_column_order[0::2]].applymap(lambda x: f'{x:.2f}')
ranked_scores_for_csv[score_rank_column_order[0::2]] = ranked_scores_for_csv[score_rank_column_order[0::2]].values + \
ranked_scores_for_csv[score_rank_column_order[1::2]].values
ranked_scores_for_csv = ranked_scores_for_csv[score_rank_column_order[0::2]]
# Save as csv
ranked_scores_for_csv.to_csv(f'{cs.results_data_dir}/{name}.csv')
return ranked_scores
def save_score_summary(cs: ModelParameters, df_errors: | |
None], None, ), # 1
)
class ExecuteStatement_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TExecuteStatementResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ExecuteStatement_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ExecuteStatement_result)
ExecuteStatement_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TExecuteStatementResp, None], None, ), # 0
)
class GetTypeInfo_args(object):
"""
Attributes:
- req
"""
def __init__(self, req=None,):
self.req = req
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.req = TGetTypeInfoReq()
self.req.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetTypeInfo_args')
if self.req is not None:
oprot.writeFieldBegin('req', TType.STRUCT, 1)
self.req.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetTypeInfo_args)
GetTypeInfo_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'req', [TGetTypeInfoReq, None], None, ), # 1
)
class GetTypeInfo_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TGetTypeInfoResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetTypeInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetTypeInfo_result)
GetTypeInfo_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TGetTypeInfoResp, None], None, ), # 0
)
class GetCatalogs_args(object):
"""
Attributes:
- req
"""
def __init__(self, req=None,):
self.req = req
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.req = TGetCatalogsReq()
self.req.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetCatalogs_args')
if self.req is not None:
oprot.writeFieldBegin('req', TType.STRUCT, 1)
self.req.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetCatalogs_args)
GetCatalogs_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'req', [TGetCatalogsReq, None], None, ), # 1
)
class GetCatalogs_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TGetCatalogsResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetCatalogs_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetCatalogs_result)
GetCatalogs_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TGetCatalogsResp, None], None, ), # 0
)
class GetSchemas_args(object):
"""
Attributes:
- req
"""
def __init__(self, req=None,):
self.req = req
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.req = TGetSchemasReq()
self.req.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetSchemas_args')
if self.req is not None:
oprot.writeFieldBegin('req', TType.STRUCT, 1)
self.req.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetSchemas_args)
GetSchemas_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'req', [TGetSchemasReq, None], None, ), # 1
)
class GetSchemas_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TGetSchemasResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetSchemas_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetSchemas_result)
GetSchemas_result.thrift_spec = (
(0, TType.STRUCT, 'success', [TGetSchemasResp, None], None, ), # 0
)
class GetTables_args(object):
"""
Attributes:
- req
"""
def __init__(self, req=None,):
self.req = req
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.req = TGetTablesReq()
self.req.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetTables_args')
if self.req is not None:
oprot.writeFieldBegin('req', TType.STRUCT, 1)
self.req.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetTables_args)
GetTables_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'req', [TGetTablesReq, None], None, ), # 1
)
class GetTables_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TGetTablesResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetTables_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % | |
# -*- coding: utf-8 -*-
# vim:set et tabstop=4 shiftwidth=4 nu nowrap fileencoding=utf-8:
import unittest
from twisted.test.proto_helpers import StringTransport
from devicehive.gateway.binary import *
class PacketTests(unittest.TestCase):
def setUp(self):
self.pkt = Packet(PACKET_SIGNATURE, 2, 3, 4, '123')
def tearDown(self):
pass
def test_properties(self):
self.assertEquals(PACKET_SIGNATURE, self.pkt.signature, 'Signatures are not equal')
self.assertEquals(2, self.pkt.version, 'Versions are not equal')
self.assertEquals(3, self.pkt.flags, 'Flags are not equal')
self.assertEquals(4, self.pkt.intent, 'Intents are not equal')
def test_checksum(self):
self.assertEquals(0xd5, self.pkt.checksum, 'Invalid checksum')
def test_checksum2(self):
pkt = Packet(PACKET_SIGNATURE, 0x1, 0x0, 0x101, b'\x01\x02\x03\x04\x05\x06')
self.assertEquals(0x59, pkt.checksum)
def test_to_binary(self):
tstval = bytearray([PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5])
binval = self.pkt.to_binary()
self.assertEquals(tstval, binval, 'Invalid binary message has been formated')
def test_from_binary(self):
pktcopy = Packet.from_binary(self.pkt.to_binary())
self.assertEquals(self.pkt.signature, pktcopy.signature)
self.assertEquals(self.pkt.version, pktcopy.version)
self.assertEquals(self.pkt.flags, pktcopy.flags)
self.assertEquals(self.pkt.intent, pktcopy.intent)
self.assertEquals(self.pkt.length, pktcopy.length)
self.assertEquals(self.pkt.data, pktcopy.data)
def test_crc_error(self):
tstval = bytearray([PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xBA])
try:
Packet.from_binary( str(tstval) )
self.assertTrue(False, 'from_binary method should raises InvalidCRCError')
except InvalidCRCError:
pass
def test_incomplete_packet(self):
tstval = bytearray([0, 1, 2, 3])
try:
Packet.from_binary( str(tstval) )
self.assertTrue(False, 'from_binary method should raises IncompltePacketError in case data-packet passed into this method is too small')
except IncompletePacketError:
pass
def test_invalid_packet_length(self):
tstval = bytearray([PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x00, 0x03, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5])
try:
Packet.from_binary( str(tstval) )
self.assertTrue(False, 'from_binary method should raises InvalidPacketlengthError in case there not enough data passed into it')
except InvalidPacketLengthError:
pass
def test_raise_invalid_signature(self):
tstval = bytearray([0xBA, 0xD1, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5])
try:
Packet.from_binary( str(tstval) )
self.assertTrue(False, 'from_binary method should raises InvalidSignatureError in case packet signature is incorrect')
except InvalidSignatureError:
pass
class BinaryPacketBufferTests(unittest.TestCase):
def test_adding_normal_packet(self):
pkt = [PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5]
pkt_buff = BinaryPacketBuffer()
pkt_buff.append(pkt)
self.assertEquals( str(bytearray(pkt)), pkt_buff.data)
self.assertTrue(pkt_buff.has_packet())
def test_adding_partial_packet(self):
pkt = [PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5]
pkt_buff = BinaryPacketBuffer()
pkt_buff.append(pkt[:4])
pkt_buff.append(pkt[4:])
self.assertEquals( str(bytearray(pkt)), pkt_buff.data, 'One complete packet should be located in the buffer')
self.assertTrue(pkt_buff.has_packet())
def test_add_packet_prefixed_with_junk(self):
pkt = [0xBA, 0xDB, 0xAD, PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5]
pkt_buff = BinaryPacketBuffer()
pkt_buff.append(pkt[:6])
pkt_buff.append(pkt[6:])
self.assertEquals( str(bytearray(pkt[3:])), pkt_buff.data, 'Junk data should be skipped in the head of packet buffer. {0} != {1}'.format(pkt[3:], pkt_buff.data))
self.assertTrue(pkt_buff.has_packet())
def test_onechar_junk_add(self):
pkt_buff = BinaryPacketBuffer()
pkt_buff.append([0])
pkt_buff.append([1])
pkt_buff.append([2])
self.assertEquals(0, len(pkt_buff.data), 'If buffer is empty and one character comes to it this character should be of SIGNATURE_HI value')
self.assertFalse(pkt_buff.has_packet())
def test_invalid_signature(self):
pkt = [99, 98, 97, PACKET_SIGNATURE_HI, 96, PACKET_SIGNATURE_LO, 94, 93, PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5]
pkt_buff = BinaryPacketBuffer()
pkt_buff.append(pkt)
self.assertEquals( str(bytearray(pkt[8:])), pkt_buff.data, 'Buffer should starts from FULL frame signature')
self.assertTrue(pkt_buff.has_packet())
def test_inv_sign_last_signhi(self):
pkt = [99, 98, 97, PACKET_SIGNATURE_HI, 96, PACKET_SIGNATURE_LO, 94, 93, PACKET_SIGNATURE_HI]
pkt_buff = BinaryPacketBuffer()
pkt_buff.append(pkt)
self.assertEquals( str(bytearray([PACKET_SIGNATURE_HI])), pkt_buff.data, 'One last character should stay untoched if it is SIGNATURE_HI')
self.assertFalse(pkt_buff.has_packet())
def test_signature_byteatatime(self):
pkt = [99, 98, 97, PACKET_SIGNATURE_HI, 96, PACKET_SIGNATURE_LO, 94, 93, PACKET_SIGNATURE_HI, PACKET_SIGNATURE_LO, 0x02, 0x03, 0x03, 0x00, 0x04, 0x00, 0x31, 0x32, 0x33, 0xd5]
pkt_buff = BinaryPacketBuffer()
for byte in pkt:
pkt_buff.append([byte])
self.assertEquals(str(bytearray(pkt[8:])), pkt_buff.data, 'Even if we adds packet by one byte the buffer should starts from FULL frame signature')
self.assertTrue(pkt_buff.has_packet())
class _TestObject(object):
class _SubObject(object):
def __init__(self, val = 0):
self._val = val
def _set_val(self, value):
self._val = value
sword_prop = binary_property(DATA_TYPE_SWORD, fget = lambda self : self._val, fset = _set_val)
__binary_struct__ = [sword_prop]
def __init__(self):
self._byte_prop = 0
self._word_prop = 0
self._dword_prop = 0
self._bool_prop = False
self._false_prop = False
self._str_prop = ''
self.arr_prop = []
self.guid_prop = uuid.uuid1()
self.aguid_prop = (uuid.uuid1()).bytes
def gen_props(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
return {'fget': fget, 'fset': fset}
byte_prop = binary_property(DATA_TYPE_BYTE, **gen_props('_byte_prop'))
word_prop = binary_property(DATA_TYPE_WORD, **gen_props('_word_prop'))
dword_prop = binary_property(DATA_TYPE_DWORD, **gen_props('_dword_prop'))
bool_prop = binary_property(DATA_TYPE_BOOL, **gen_props('_bool_prop'))
false_prop = binary_property(DATA_TYPE_BOOL, **gen_props('_false_prop'))
str_prop = binary_property(DATA_TYPE_STRING, **gen_props('_str_prop'))
arr_prop = array_binary_property(ArrayQualifier(_SubObject), **gen_props('_arr_prop'))
guid_prop = binary_property(DATA_TYPE_GUID, **gen_props('_guid_prop'))
aguid_prop = binary_property(DATA_TYPE_GUID, **gen_props('_aguid_prop'))
__binary_struct__ = (byte_prop, word_prop, dword_prop, bool_prop, false_prop, str_prop, arr_prop, guid_prop, aguid_prop)
class BinaryFormatterTest(unittest.TestCase):
def _create_default_test_object(self):
res = _TestObject()
res.byte_prop = 0xab
res.word_prop = 0xabcd
res.dword_prop = 0x12345678
res.bool_prop = True
res.false_prop = False
res.str_prop = 'abc'
res.arr_prop = (_TestObject._SubObject(-1024), _TestObject._SubObject(-8192))
res.guid_prop = uuid.UUID('fa8a9d6e-6555-11e2-89b8-e0cb4eb92129')
res.aguid_prop = res.guid_prop.bytes
return res
def setUp(self):
self.binary = bytearray([0xab,
0xcd, 0xab,
0x78, 0x56, 0x34, 0x12,
0x01,
0x00,
0x03, 0x00, ord('a'), ord('b'), ord('c'),
0x02, 0x00, 0x00, 0xfc, 0x00, 0xe0,
0xfa, 0x8a, 0x9d, 0x6e, 0x65, 0x55, 0x11, 0xe2, 0x89, 0xb8, 0xe0, 0xcb, 0x4e, 0xb9, 0x21, 0x29,
0xfa, 0x8a, 0x9d, 0x6e, 0x65, 0x55, 0x11, 0xe2, 0x89, 0xb8, 0xe0, 0xcb, 0x4e, 0xb9, 0x21, 0x29])
def test_serialize_byte(self):
obj = self._create_default_test_object()
binstr = BinaryFormatter.serialize(obj)
self.assertEquals(self.binary, binstr)
def test_deserializer(self):
res = BinaryFormatter.deserialize(self.binary, _TestObject)
self.assertEquals(0xab, res.byte_prop)
self.assertEquals(0xabcd, res.word_prop)
self.assertEquals(0x12345678, res.dword_prop)
self.assertTrue(res.bool_prop)
self.assertFalse(res.false_prop)
self.assertEquals('abc', res.str_prop)
self.assertEquals(2, len(res.arr_prop))
self.assertEquals(-1024, res.arr_prop[0].sword_prop)
self.assertEquals(-8192, res.arr_prop[1].sword_prop)
guid = uuid.UUID('fa8a9d6e-6555-11e2-89b8-e0cb4eb92129')
self.assertEquals(guid, res.guid_prop)
self.assertEquals(guid, res.aguid_prop)
def test_deserialize_array_prop_invalid_definition(self):
class _InvalidDefObject(object):
def __init__(self, val = 0):
self._val = val
def _set_val(self, value):
self._val = value
invalid_array_prop = binary_property(DATA_TYPE_ARRAY, fget = lambda self : self._val, fset = _set_val)
__binary_struct__ = [invalid_array_prop]
invalidbin = bytearray([0x01, 0x00, 0xff])
try :
res = BinaryFormatter.deserialize(self.binary, _InvalidDefObject)
self.assertTrue(False, 'Deserialization should raises an exception on attempt to deserialize invalid defined object')
except BinaryDeserializationError:
pass
def test_deserialize_register2(self):
payload = b'{"id":"fa8a9d6e-6555-11e2-89b8-e0cb4eb92129","key":"DEVICE_KEY","name":"DEVICE_NAME","deviceClass":{"name":"DEVICE_CLASS_NAME","version":"DEVICE_CLASS_VERSION"},"equipment":[{"code":"LED_EQP_CODE","name":"LED_EQP_NAME","type":"LED_EQP_TYPE"},{"code":"BTN_EQP_CODE","name":"BTN_EQP_NAME","type":"BTN_EQP_TYPE"}],"commands":[{"intent":257,"name":"UpdateLedState","params":{"equipment":"str","state":"bool"}}],"notifications":[{"intent":256,"name":"equipment","params":{"equipment":"str","state":"bool"}}]}'
obj = BinaryFormatter.deserialize_register2(payload)
self.assertEquals(uuid.UUID('fa8a9d6e-6555-11e2-89b8-e0cb4eb92129'), obj.device_id)
self.assertEquals(u'DEVICE_KEY', obj.device_key)
self.assertEquals(u'DEVICE_NAME', obj.device_name)
self.assertEquals(u'DEVICE_CLASS_NAME', obj.device_class_name)
self.assertEquals(u'DEVICE_CLASS_VERSION', obj.device_class_version)
# equipment
self.assertEquals(2, len(obj.equipment))
self.assertEquals(u'LED_EQP_CODE', obj.equipment[0].code)
self.assertEquals(u'LED_EQP_NAME', obj.equipment[0].name)
self.assertEquals(u'LED_EQP_TYPE', obj.equipment[0].typename)
self.assertEquals(u'BTN_EQP_CODE', obj.equipment[1].code)
self.assertEquals(u'BTN_EQP_NAME', obj.equipment[1].name)
self.assertEquals(u'BTN_EQP_TYPE', obj.equipment[1].typename)
# command
self.assertEquals(1, len(obj.commands))
self.assertEquals(257, obj.commands[0].intent)
self.assertEquals(u'UpdateLedState', obj.commands[0].name)
self.assertEquals(2, len(obj.commands[0].parameters))
self.assertEquals(u'equipment', obj.commands[0].parameters[0].name)
self.assertEquals(DATA_TYPE_STRING, obj.commands[0].parameters[0].type)
self.assertEquals(u'state', obj.commands[0].parameters[1].name)
self.assertEquals(DATA_TYPE_BOOL, obj.commands[0].parameters[1].type)
# notifications
self.assertEquals(1, len(obj.notifications))
self.assertEquals(256, obj.notifications[0].intent)
self.assertEquals(u'equipment', obj.notifications[0].name)
self.assertEquals(2, len(obj.notifications[0].parameters))
self.assertEquals(u'equipment', obj.notifications[0].parameters[0].name)
self.assertEquals(DATA_TYPE_STRING, obj.notifications[0].parameters[0].type)
self.assertEquals(u'state', obj.notifications[0].parameters[1].name)
self.assertEquals(DATA_TYPE_BOOL, obj.notifications[0].parameters[1].type)
def test_deserialize_complex_array(self) :
payload = b'{"id":"fa8a9d6e-6555-11e2-89b8-e0cb4eb92129","key":"1","name":"2","deviceClass":{"name":"3","version":"4"},"equipment":[{"code":"5","name":"6","type":"7"}],"commands":[{"intent":257,"name":"7","params":{"e":"str","state":"bool"}}],"notifications":[{"intent":300,"name":"equipment","params":{"array_prop":["str"]}}]}'
obj = BinaryFormatter.deserialize_register2(payload)
self.assertEquals(300, obj.notifications[0].intent)
# test array property
prop = obj.notifications[0].parameters[0]
self.assertEquals(DATA_TYPE_ARRAY, prop.type)
self.assertTrue(isinstance(prop.qualifier, ArrayQualifier))
self.assertEquals(DATA_TYPE_STRING, prop.qualifier.data_type)
def test_deserialize_complex_obj(self) :
payload = b'{"id":"fa8a9d6e-6555-11e2-89b8-e0cb4eb92129","key":"1","name":"2","deviceClass":{"name":"3","version":"4"},"equipment":[{"code":"5","name":"6","type":"7"}],"commands":[{"intent":257,"name":"7","params":{"e":"str","state":"bool"}}],"notifications":[{"intent":300,"name":"equipment","params":{"obj_prop":{"str_prop":"str"}}}]}'
obj = BinaryFormatter.deserialize_register2(payload)
self.assertEquals(300, obj.notifications[0].intent)
prop = obj.notifications[0].parameters[0]
self.assertEquals(u'obj_prop', prop.name)
self.assertEquals(DATA_TYPE_OBJECT, prop.type)
self.assertTrue(hasattr(prop.qualifier, 'str_prop'))
self.assertTrue(isinstance(prop.qualifier.str_prop, binary_property))
self.assertEquals(DATA_TYPE_STRING, prop.qualifier.str_prop.type)
def test_deserialize_complex_array_obj(self):
payload = b'{"id":"fa8a9d6e-6555-11e2-89b8-e0cb4eb92129","key":"1","name":"2","deviceClass":{"name":"3","version":"4"},"equipment":[{"code":"5","name":"6","type":"7"}],"commands":[{"intent":257,"name":"7","params":{"e":"str","state":"bool"}}],"notifications":[{"intent":300,"name":"equipment","params":{"array_prop":[{"str_prop":"str"}]}}]}'
obj = BinaryFormatter.deserialize_register2(payload)
prop = obj.notifications[0].parameters[0]
self.assertEquals(u'array_prop', prop.name)
self.assertEquals(DATA_TYPE_ARRAY, prop.type)
self.assertTrue( isinstance(prop.qualifier, ArrayQualifier) )
self.assertFalse(prop.qualifier.data_type is None)
self.assertTrue(hasattr(prop.qualifier.data_type, 'str_prop'))
self.assertTrue(isinstance(prop.qualifier.data_type.str_prop, binary_property))
self.assertEquals(DATA_TYPE_STRING, prop.qualifier.data_type.str_prop.type)
def test_deserialize_complex_array_array_obj_array(self) :
payload = b'{"id":"fa8a9d6e-6555-11e2-89b8-e0cb4eb92129","key":"1","name":"2","deviceClass":{"name":"3","version":"4"},"equipment":[{"code":"5","name":"6","type":"7"}],"commands":[{"intent":257,"name":"7","params":{"e":"str","state":"bool"}}],"notifications":[{"intent":300,"name":"equipment","params":{"array_prop":[[{"array_prop":["str"]}]]}}]}'
obj = BinaryFormatter.deserialize_register2(payload)
prop = obj.notifications[0].parameters[0]
self.assertEquals(u'array_prop', prop.name)
self.assertEquals(DATA_TYPE_ARRAY, prop.type)
self.assertTrue(isinstance(prop.qualifier, ArrayQualifier))
self.assertTrue(isinstance(prop.qualifier.data_type, ArrayQualifier))
self.assertTrue(prop.qualifier.data_type.is_object())
#
objdescr = prop.qualifier.data_type.data_type
self.assertTrue (hasattr(objdescr, 'array_prop'))
self.assertEquals(DATA_TYPE_ARRAY, objdescr.array_prop.type)
self.assertTrue( isinstance(objdescr.array_prop.qualifier, ArrayQualifier) )
self.assertEquals(DATA_TYPE_STRING, objdescr.array_prop.qualifier.data_type)
def test_complex_object(self) :
class _Tmp(object):
class _SubTmp(object) :
sub_byte_property = binary_property(DATA_TYPE_BYTE)
__binary_struct__ = (sub_byte_property,)
byte_property = binary_property(DATA_TYPE_BYTE)
obj_property = object_binary_property(_SubTmp)
a1_property = array_binary_property(ArrayQualifier( DATA_TYPE_BYTE ))
a2_property = array_binary_property(ArrayQualifier( _SubTmp ))
a3_property = array_binary_property(ArrayQualifier( ArrayQualifier(DATA_TYPE_BYTE) ))
a4_property = array_binary_property(ArrayQualifier( ArrayQualifier(_SubTmp) ))
__binary_struct__ = (byte_property, obj_property, a1_property, a2_property, a3_property, a4_property)
# initialize class properties
t = _Tmp()
t.byte_property = 125
t.obj_property = _Tmp._SubTmp()
t.obj_property.sub_byte_property = 100
t.a1_property = (1, 2, 3)
t.a2_property = (_Tmp._SubTmp(), _Tmp._SubTmp())
t.a2_property[0].sub_byte_property = 50
t.a2_property[1].sub_byte_property = 60
t.a3_property = (ArrayContainer(DATA_TYPE_BYTE, [1, 2]), ArrayContainer(DATA_TYPE_BYTE, [3, 4]))
t.a4_property = (ArrayContainer(_Tmp._SubTmp, [_Tmp._SubTmp()]),)
t.a4_property[0][0].sub_byte_property = 70
bin = BinaryFormatter.serialize(t)
self.assertEquals(bytearray([125, 100, 0x03, 0x00, 0x01, 0x02, 0x03, 0x02, 0x00, 50, 60, 0x02, 0x00, 0x02, 0x00, 1, 2, 0x02, 0x00, 3, 4, 0x01, 0x00, 0x01, 0x00, 70]), bin)
class BinaryConstructableTest(unittest.TestCase):
class _ElementType(object) :
sub_property1 = binary_property(DATA_TYPE_BOOL)
__binary_struct__ = (sub_property1, )
def setUp(self):
params = (Parameter(DATA_TYPE_WORD, 'property1'),
Parameter(DATA_TYPE_BYTE, 'property2'),
Parameter(DATA_TYPE_ARRAY, 'property3', ArrayQualifier(DATA_TYPE_BYTE)),
Parameter(DATA_TYPE_OBJECT, 'property4', BinaryConstructableTest._ElementType))
self.cmd = Command(intent = 100, name = 'CommandName', parameters = params)
def test_descriptor(self):
cls = self.cmd.descriptor()
self.assertTrue(issubclass(cls, ToDictionary))
self.assertTrue(hasattr(cls, 'property1'))
self.assertTrue(isinstance(cls.property1, binary_property))
self.assertTrue(hasattr(cls, 'property2'))
self.assertTrue(isinstance(cls.property2, binary_property))
self.assertTrue(hasattr(cls, 'property3'))
self.assertTrue(isinstance(cls.property3, array_binary_property))
self.assertTrue(isinstance(cls.property3.qualifier, ArrayQualifier))
self.assertEquals(DATA_TYPE_BYTE, cls.property3.qualifier.data_type)
self.assertTrue(hasattr(cls, 'property4'))
self.assertTrue(isinstance(cls.property4, object_binary_property))
self.assertEquals(BinaryConstructableTest._ElementType, cls.property4.qualifier)
self.assertTrue(hasattr(cls.property4.qualifier, 'sub_property1'))
def test_top_level_scalar(self):
pass
class ToDictionaryTest(unittest.TestCase):
def setUp(self) :
class _Test(ToDictionary) :
class _SubTyp(ToDictionary) :
i8_prop = binary_property(DATA_TYPE_SBYTE)
__binary_struct__ = (i8_prop,)
u8_prop = binary_property(DATA_TYPE_BYTE)
obj_prop = object_binary_property(_SubTyp)
ab_prop = array_binary_property( ArrayQualifier(DATA_TYPE_WORD) )
ao_prop = array_binary_property( ArrayQualifier(_SubTyp) )
aa_prop = array_binary_property( ArrayQualifier(ArrayQualifier(DATA_TYPE_BYTE)) )
__binary_struct__ = (u8_prop, obj_prop, ab_prop, ao_prop, aa_prop)
#
self.obj = _Test()
self.obj = _Test()
| |
another factor of two?
i = es.countiter % N
try:
self._addstop('noeffectaxis',
np.sum(es.mean == es.mean + 0.1 * es.sigma *
es.sm.D[i] * es.sigma_vec.scaling *
(es.sm.B[:, i] if len(es.sm.B.shape) > 1 else es.sm.B[0])) == N)
except AttributeError:
pass
self._addstop('tolconditioncov',
opts['tolconditioncov'] and
es.D[-1] > opts['tolconditioncov']**0.5 * es.D[0], opts['tolconditioncov'])
self._addstop('callback', any(es.callbackstop), es.callbackstop) # termination_callback
if 1 < 3 or len(self): # only if another termination criterion is satisfied
if 1 < 3:
if es.fit.fit[0] < es.fit.fit[int(0.75 * es.popsize)]:
es.fit.flatfit_iterations = 0
else:
# print(es.fit.fit)
es.fit.flatfit_iterations += 1
if (es.fit.flatfit_iterations > opts['tolflatfitness'] # or
# mainly for historical reasons:
# max(es.fit.hist[:1 + int(opts['tolflatfitness'])]) == min(es.fit.hist[:1 + int(opts['tolflatfitness'])])
):
self._addstop('tolflatfitness')
if 11 < 3 and max(es.fit.fit) == min(es.fit.fit) == es.best.last.f: # keep warning for historical reasons for the time being
utils.print_warning(
"flat fitness (f=%f, sigma=%.2e). "
"For small sigma, this could indicate numerical convergence. \n"
"Otherwise, please (re)consider how to compute the fitness more elaborately." %
(es.fit.fit[0], es.sigma), iteration=es.countiter)
if 11 < 3: # add stop condition, in case, replaced by above, subject to removal
self._addstop('flat fitness', # message via stopdict
len(es.fit.hist) > 9 and
max(es.fit.hist) == min(es.fit.hist) and
max(es.fit.fit) == min(es.fit.fit),
"please (re)consider how to compute the fitness more elaborately if sigma=%.2e is large" % es.sigma)
if 11 < 3 and opts['vv'] == 321:
self._addstop('||xmean||^2<ftarget', sum(es.mean**2) <= opts['ftarget'])
return self
def _addstop(self, key, cond=True, val=None):
if key == self._get_value:
self._value = val
self._get_value = None
elif cond:
self.stoplist.append(key) # can have the same key twice
self[key] = val if val is not None \
else self.opts.get(key, None)
def clear(self):
"""empty the stopdict"""
for k in list(self):
self.pop(k)
self.stoplist = []
class _CMAParameters(object):
"""strategy parameters like population size and learning rates.
Note:
contrary to `CMAOptions`, `_CMAParameters` is not (yet) part of the
"user-interface" and subject to future changes (it might become
a `collections.namedtuple`)
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1) #doctest: +ELLIPSIS
(6_w,12)-aCMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=...)
>>>
>>> type(es.sp) # sp contains the strategy parameters
<class 'cma.evolution_strategy._CMAParameters'>
>>> es.sp.disp() #doctest: +ELLIPSIS
{'CMA_on': True,
'N': 20,
'c1': 0.00437235...,
'c1_sep': 0.0343279...,
'cc': 0.171767...,
'cc_sep': 0.252594...,
'cmean': array(1...,
'cmu': 0.00921656...,
'cmu_sep': 0.0565385...,
'lam_mirr': 0,
'mu': 6,
'popsize': 12,
'weights': [0.4024029428...,
0.2533890840...,
0.1662215645...,
0.1043752252...,
0.05640347757...,
0.01720770576...,
-0.05018713636...,
-0.1406167894...,
-0.2203813963...,
-0.2917332686...,
-0.3562788884...,
-0.4152044225...]}
>>>
:See: `CMAOptions`, `CMAEvolutionStrategy`
"""
def __init__(self, N, opts, ccovfac=1, verbose=True):
"""Compute strategy parameters, mainly depending on
dimension and population size, by calling `set`
"""
self.N = N
if ccovfac == 1:
ccovfac = opts['CMA_on'] # that's a hack
self.popsize = None # declaring the attribute, not necessary though
self.set(opts, ccovfac=ccovfac, verbose=verbose)
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
limit_fac_cc = 4.0 # in future: 10**(1 - N**-0.33)?
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2. * np.sqrt(df) + float(mu) / N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu + 1. / mu - 2) / (df + 4 * np.sqrt(df) + mu / 2.)
sp = self # mainly for historical reasons
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions()
## meta_parameters.lambda_exponent == 0.0
popsize = int(popsize + N** 0.0 - 1)
# set weights
sp.weights = RecombinationWeights(popsize)
if opts['CMA_mu']:
sp.weights = RecombinationWeights(2 * opts['CMA_mu'])
while len(sp.weights) < popsize:
sp.weights.insert(sp.weights.mu, 0.0)
if utils.is_(opts['CMA_recombination_weights']):
sp.weights[:] = opts['CMA_recombination_weights']
sp.weights.set_attributes_from_weights()
popsize = len(sp.weights)
# weights.finalize_negative_weights will be called below
sp.popsize = popsize
sp.mu = sp.weights.mu # not used anymore but for the record
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.popsize // 2 > sp.popsize - 2 * sp.lam_mirr + 1:
utils.print_warning("pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.popsize // 2, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise ValueError("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
mueff = sp.weights.mueff
# line 3415
## meta_parameters.cc_exponent == 1.0
b = 1.0
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * (limit_fac_cc + mueff / N)**b / \
(N**b + (limit_fac_cc + 2 * mueff / N)**b)
sp.cc_sep = (1 + 1 / N + mueff / N) / \
(N**0.5 + 1 / N + 2 * mueff / N)
if hasattr(opts['vv'], '__getitem__'):
if 'sweep_ccov1' in opts['vv']:
sp.cc = 1.0 * (4 + mueff / N)**0.5 / ((N + 4)**0.5 +
(2 * mueff / N)**0.5)
if 'sweep_cc' in opts['vv']:
sp.cc = opts['vv']['sweep_cc']
sp.cc_sep = sp.cc
print('cc is %f' % sp.cc)
## meta_parameters.c1_multiplier == 1.0
sp.c1 = (1.0 * opts['CMA_rankone'] * ccovfac * min(1, sp.popsize / 6) *
## meta_parameters.c1_exponent == 2.0
2 / ((N + 1.3)** 2.0 + mueff))
# 2 / ((N + 1.3)** 1.5 + mueff)) # TODO
# 2 / ((N + 1.3)** 1.75 + mueff)) # TODO
# 1/0
sp.c1_sep = opts['CMA_rankone'] * ccovfac * conedf(N, mueff, N)
if 11 < 3:
sp.c1 = 0.
print('c1 is zero')
if utils.is_(opts['CMA_rankmu']): # also empty
## meta_parameters.cmu_multiplier == 2.0
alphacov = 2.0
## meta_parameters.rankmu_offset == 0.25
rankmu_offset = 0.25
# the influence of rankmu_offset in [0, 1] on performance is
# barely visible
if hasattr(opts['vv'], '__getitem__') and 'sweep_rankmu_offset' in opts['vv']:
rankmu_offset = opts['vv']['sweep_rankmu_offset']
print("rankmu_offset = %.2f" % rankmu_offset)
mu = mueff
sp.cmu = min(1 - sp.c1,
opts['CMA_rankmu'] * ccovfac * alphacov *
# simpler nominator would be: (mu - 0.75)
(rankmu_offset + mu + 1 / mu - 2) /
## meta_parameters.cmu_exponent == 2.0
((N + 2)** 2.0 + alphacov * mu / 2))
# ((N + 2)** 1.5 + alphacov * mu / 2)) # TODO
# ((N + 2)** 1.75 + alphacov * mu / 2)) # TODO
# cmu -> 1 for mu -> N**2 * (2 / alphacov)
if hasattr(opts['vv'], '__getitem__') and 'sweep_ccov' in opts['vv']:
sp.cmu = opts['vv']['sweep_ccov']
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, mueff, rankmu_offset))
else:
sp.cmu = sp.cmu_sep = 0
if hasattr(opts['vv'], '__getitem__') and 'sweep_ccov1' in opts['vv']:
sp.c1 = opts['vv']['sweep_ccov1']
if any(w < 0 for w in sp.weights):
if opts['CMA_active'] and opts['CMA_on'] and opts['CMA_rankmu']:
sp.weights.finalize_negative_weights(N, sp.c1, sp.cmu)
# this is re-done using self.sm.parameters()['c1']...
else:
sp.weights.zero_negative_weights()
# line 3834
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
# line 3480
if 11 < 3:
# this is worse than damps = 1 + sp.cs for the (1,10000)-ES on 40D parabolic ridge
sp.damps = 0.3 + 2 * max([mueff / sp.popsize, ((mueff - 1) / (N + 1))**0.5 - 1]) + sp.cs
if 11 < 3:
# this does not work for lambda = 4*N^2 on the parabolic ridge
sp.damps = opts['CSA_dampfac'] * (2 - 0 * sp.lam_mirr / sp.popsize) * mueff / sp.popsize + 0.3 | |
self.TXUF
self.RXDONE = RM_Field_FRC_SEQIEN_RXDONE(self)
self.zz_fdict['RXDONE'] = self.RXDONE
self.RXABORTED = RM_Field_FRC_SEQIEN_RXABORTED(self)
self.zz_fdict['RXABORTED'] = self.RXABORTED
self.FRAMEERROR = RM_Field_FRC_SEQIEN_FRAMEERROR(self)
self.zz_fdict['FRAMEERROR'] = self.FRAMEERROR
self.BLOCKERROR = RM_Field_FRC_SEQIEN_BLOCKERROR(self)
self.zz_fdict['BLOCKERROR'] = self.BLOCKERROR
self.RXOF = RM_Field_FRC_SEQIEN_RXOF(self)
self.zz_fdict['RXOF'] = self.RXOF
self.WCNTCMP0 = RM_Field_FRC_SEQIEN_WCNTCMP0(self)
self.zz_fdict['WCNTCMP0'] = self.WCNTCMP0
self.WCNTCMP1 = RM_Field_FRC_SEQIEN_WCNTCMP1(self)
self.zz_fdict['WCNTCMP1'] = self.WCNTCMP1
self.WCNTCMP2 = RM_Field_FRC_SEQIEN_WCNTCMP2(self)
self.zz_fdict['WCNTCMP2'] = self.WCNTCMP2
self.ADDRERROR = RM_Field_FRC_SEQIEN_ADDRERROR(self)
self.zz_fdict['ADDRERROR'] = self.ADDRERROR
self.BUSERROR = RM_Field_FRC_SEQIEN_BUSERROR(self)
self.zz_fdict['BUSERROR'] = self.BUSERROR
self.RXRAWEVENT = RM_Field_FRC_SEQIEN_RXRAWEVENT(self)
self.zz_fdict['RXRAWEVENT'] = self.RXRAWEVENT
self.TXRAWEVENT = RM_Field_FRC_SEQIEN_TXRAWEVENT(self)
self.zz_fdict['TXRAWEVENT'] = self.TXRAWEVENT
self.SNIFFOF = RM_Field_FRC_SEQIEN_SNIFFOF(self)
self.zz_fdict['SNIFFOF'] = self.SNIFFOF
self.WCNTCMP3 = RM_Field_FRC_SEQIEN_WCNTCMP3(self)
self.zz_fdict['WCNTCMP3'] = self.WCNTCMP3
self.WCNTCMP4 = RM_Field_FRC_SEQIEN_WCNTCMP4(self)
self.zz_fdict['WCNTCMP4'] = self.WCNTCMP4
self.BOISET = RM_Field_FRC_SEQIEN_BOISET(self)
self.zz_fdict['BOISET'] = self.BOISET
self.PKTBUFSTART = RM_Field_FRC_SEQIEN_PKTBUFSTART(self)
self.zz_fdict['PKTBUFSTART'] = self.PKTBUFSTART
self.PKTBUFTHRESHOLD = RM_Field_FRC_SEQIEN_PKTBUFTHRESHOLD(self)
self.zz_fdict['PKTBUFTHRESHOLD'] = self.PKTBUFTHRESHOLD
self.RXRAWOF = RM_Field_FRC_SEQIEN_RXRAWOF(self)
self.zz_fdict['RXRAWOF'] = self.RXRAWOF
self.FRAMEDETPAUSED = RM_Field_FRC_SEQIEN_FRAMEDETPAUSED(self)
self.zz_fdict['FRAMEDETPAUSED'] = self.FRAMEDETPAUSED
self.INTERLEAVEWRITEPAUSED = RM_Field_FRC_SEQIEN_INTERLEAVEWRITEPAUSED(self)
self.zz_fdict['INTERLEAVEWRITEPAUSED'] = self.INTERLEAVEWRITEPAUSED
self.INTERLEAVEREADPAUSED = RM_Field_FRC_SEQIEN_INTERLEAVEREADPAUSED(self)
self.zz_fdict['INTERLEAVEREADPAUSED'] = self.INTERLEAVEREADPAUSED
self.TXSUBFRAMEPAUSED = RM_Field_FRC_SEQIEN_TXSUBFRAMEPAUSED(self)
self.zz_fdict['TXSUBFRAMEPAUSED'] = self.TXSUBFRAMEPAUSED
self.CONVPAUSED = RM_Field_FRC_SEQIEN_CONVPAUSED(self)
self.zz_fdict['CONVPAUSED'] = self.CONVPAUSED
self.RXWORD = RM_Field_FRC_SEQIEN_RXWORD(self)
self.zz_fdict['RXWORD'] = self.RXWORD
self.TXWORD = RM_Field_FRC_SEQIEN_TXWORD(self)
self.zz_fdict['TXWORD'] = self.TXWORD
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_WCNTCMP3(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_WCNTCMP3, self).__init__(rmio, label,
0xa8004000, 0x0BC,
'WCNTCMP3', 'FRC.WCNTCMP3', 'read-write',
u"",
0x00000000, 0x00000FFF,
0x00001000, 0x00002000,
0x00003000)
self.SUPPLENFIELDLOC = RM_Field_FRC_WCNTCMP3_SUPPLENFIELDLOC(self)
self.zz_fdict['SUPPLENFIELDLOC'] = self.SUPPLENFIELDLOC
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_BOICTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_BOICTRL, self).__init__(rmio, label,
0xa8004000, 0x0C0,
'BOICTRL', 'FRC.BOICTRL', 'read-write',
u"",
0x00000000, 0x0001FFFF,
0x00001000, 0x00002000,
0x00003000)
self.BOIEN = RM_Field_FRC_BOICTRL_BOIEN(self)
self.zz_fdict['BOIEN'] = self.BOIEN
self.BOIFIELDLOC = RM_Field_FRC_BOICTRL_BOIFIELDLOC(self)
self.zz_fdict['BOIFIELDLOC'] = self.BOIFIELDLOC
self.BOIBITPOS = RM_Field_FRC_BOICTRL_BOIBITPOS(self)
self.zz_fdict['BOIBITPOS'] = self.BOIBITPOS
self.BOIMATCHVAL = RM_Field_FRC_BOICTRL_BOIMATCHVAL(self)
self.zz_fdict['BOIMATCHVAL'] = self.BOIMATCHVAL
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_DSLCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_DSLCTRL, self).__init__(rmio, label,
0xa8004000, 0x0C4,
'DSLCTRL', 'FRC.DSLCTRL', 'read-write',
u"",
0x00000000, 0x7FFFFF7F,
0x00001000, 0x00002000,
0x00003000)
self.DSLMODE = RM_Field_FRC_DSLCTRL_DSLMODE(self)
self.zz_fdict['DSLMODE'] = self.DSLMODE
self.DSLBITORDER = RM_Field_FRC_DSLCTRL_DSLBITORDER(self)
self.zz_fdict['DSLBITORDER'] = self.DSLBITORDER
self.DSLSHIFT = RM_Field_FRC_DSLCTRL_DSLSHIFT(self)
self.zz_fdict['DSLSHIFT'] = self.DSLSHIFT
self.DSLOFFSET = RM_Field_FRC_DSLCTRL_DSLOFFSET(self)
self.zz_fdict['DSLOFFSET'] = self.DSLOFFSET
self.DSLBITS = RM_Field_FRC_DSLCTRL_DSLBITS(self)
self.zz_fdict['DSLBITS'] = self.DSLBITS
self.DSLMINLENGTH = RM_Field_FRC_DSLCTRL_DSLMINLENGTH(self)
self.zz_fdict['DSLMINLENGTH'] = self.DSLMINLENGTH
self.RXSUPRECEPMODE = RM_Field_FRC_DSLCTRL_RXSUPRECEPMODE(self)
self.zz_fdict['RXSUPRECEPMODE'] = self.RXSUPRECEPMODE
self.STORESUP = RM_Field_FRC_DSLCTRL_STORESUP(self)
self.zz_fdict['STORESUP'] = self.STORESUP
self.SUPSHFFACTOR = RM_Field_FRC_DSLCTRL_SUPSHFFACTOR(self)
self.zz_fdict['SUPSHFFACTOR'] = self.SUPSHFFACTOR
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_WCNTCMP4(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_WCNTCMP4, self).__init__(rmio, label,
0xa8004000, 0x0C8,
'WCNTCMP4', 'FRC.WCNTCMP4', 'read-write',
u"",
0x00000000, 0x00000FFF,
0x00001000, 0x00002000,
0x00003000)
self.SUPPLENGTH = RM_Field_FRC_WCNTCMP4_SUPPLENGTH(self)
self.zz_fdict['SUPPLENGTH'] = self.SUPPLENGTH
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUFCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUFCTRL, self).__init__(rmio, label,
0xa8004000, 0x0CC,
'PKTBUFCTRL', 'FRC.PKTBUFCTRL', 'read-write',
u"",
0x00000000, 0x0103FFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUFSTARTLOC = RM_Field_FRC_PKTBUFCTRL_PKTBUFSTARTLOC(self)
self.zz_fdict['PKTBUFSTARTLOC'] = self.PKTBUFSTARTLOC
self.PKTBUFTHRESHOLD = RM_Field_FRC_PKTBUFCTRL_PKTBUFTHRESHOLD(self)
self.zz_fdict['PKTBUFTHRESHOLD'] = self.PKTBUFTHRESHOLD
self.PKTBUFTHRESHOLDEN = RM_Field_FRC_PKTBUFCTRL_PKTBUFTHRESHOLDEN(self)
self.zz_fdict['PKTBUFTHRESHOLDEN'] = self.PKTBUFTHRESHOLDEN
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUFSTATUS(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUFSTATUS, self).__init__(rmio, label,
0xa8004000, 0x0D0,
'PKTBUFSTATUS', 'FRC.PKTBUFSTATUS', 'read-only',
u"",
0x00000000, 0x0000003F,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUFCOUNT = RM_Field_FRC_PKTBUFSTATUS_PKTBUFCOUNT(self)
self.zz_fdict['PKTBUFCOUNT'] = self.PKTBUFCOUNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF0, self).__init__(rmio, label,
0xa8004000, 0x0D4,
'PKTBUF0', 'FRC.PKTBUF0', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF0 = RM_Field_FRC_PKTBUF0_PKTBUF0(self)
self.zz_fdict['PKTBUF0'] = self.PKTBUF0
self.PKTBUF1 = RM_Field_FRC_PKTBUF0_PKTBUF1(self)
self.zz_fdict['PKTBUF1'] = self.PKTBUF1
self.PKTBUF2 = RM_Field_FRC_PKTBUF0_PKTBUF2(self)
self.zz_fdict['PKTBUF2'] = self.PKTBUF2
self.PKTBUF3 = RM_Field_FRC_PKTBUF0_PKTBUF3(self)
self.zz_fdict['PKTBUF3'] = self.PKTBUF3
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF1, self).__init__(rmio, label,
0xa8004000, 0x0D8,
'PKTBUF1', 'FRC.PKTBUF1', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF4 = RM_Field_FRC_PKTBUF1_PKTBUF4(self)
self.zz_fdict['PKTBUF4'] = self.PKTBUF4
self.PKTBUF5 = RM_Field_FRC_PKTBUF1_PKTBUF5(self)
self.zz_fdict['PKTBUF5'] = self.PKTBUF5
self.PKTBUF6 = RM_Field_FRC_PKTBUF1_PKTBUF6(self)
self.zz_fdict['PKTBUF6'] = self.PKTBUF6
self.PKTBUF7 = RM_Field_FRC_PKTBUF1_PKTBUF7(self)
self.zz_fdict['PKTBUF7'] = self.PKTBUF7
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF2(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF2, self).__init__(rmio, label,
0xa8004000, 0x0DC,
'PKTBUF2', 'FRC.PKTBUF2', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF8 = RM_Field_FRC_PKTBUF2_PKTBUF8(self)
self.zz_fdict['PKTBUF8'] = self.PKTBUF8
self.PKTBUF9 = RM_Field_FRC_PKTBUF2_PKTBUF9(self)
self.zz_fdict['PKTBUF9'] = self.PKTBUF9
self.PKTBUF10 = RM_Field_FRC_PKTBUF2_PKTBUF10(self)
self.zz_fdict['PKTBUF10'] = self.PKTBUF10
self.PKTBUF11 = RM_Field_FRC_PKTBUF2_PKTBUF11(self)
self.zz_fdict['PKTBUF11'] = self.PKTBUF11
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF3(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF3, self).__init__(rmio, label,
0xa8004000, 0x0E0,
'PKTBUF3', 'FRC.PKTBUF3', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF12 = RM_Field_FRC_PKTBUF3_PKTBUF12(self)
self.zz_fdict['PKTBUF12'] = self.PKTBUF12
self.PKTBUF13 = RM_Field_FRC_PKTBUF3_PKTBUF13(self)
self.zz_fdict['PKTBUF13'] = self.PKTBUF13
self.PKTBUF14 = RM_Field_FRC_PKTBUF3_PKTBUF14(self)
self.zz_fdict['PKTBUF14'] = self.PKTBUF14
self.PKTBUF15 = RM_Field_FRC_PKTBUF3_PKTBUF15(self)
self.zz_fdict['PKTBUF15'] = self.PKTBUF15
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF4(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF4, self).__init__(rmio, label,
0xa8004000, 0x0E4,
'PKTBUF4', 'FRC.PKTBUF4', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF16 = RM_Field_FRC_PKTBUF4_PKTBUF16(self)
self.zz_fdict['PKTBUF16'] = self.PKTBUF16
self.PKTBUF17 = RM_Field_FRC_PKTBUF4_PKTBUF17(self)
self.zz_fdict['PKTBUF17'] = self.PKTBUF17
self.PKTBUF18 = RM_Field_FRC_PKTBUF4_PKTBUF18(self)
self.zz_fdict['PKTBUF18'] = self.PKTBUF18
self.PKTBUF19 = RM_Field_FRC_PKTBUF4_PKTBUF19(self)
self.zz_fdict['PKTBUF19'] = self.PKTBUF19
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF5(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF5, self).__init__(rmio, label,
0xa8004000, 0x0E8,
'PKTBUF5', 'FRC.PKTBUF5', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF20 = RM_Field_FRC_PKTBUF5_PKTBUF20(self)
self.zz_fdict['PKTBUF20'] = self.PKTBUF20
self.PKTBUF21 = RM_Field_FRC_PKTBUF5_PKTBUF21(self)
self.zz_fdict['PKTBUF21'] = self.PKTBUF21
self.PKTBUF22 = RM_Field_FRC_PKTBUF5_PKTBUF22(self)
self.zz_fdict['PKTBUF22'] = self.PKTBUF22
self.PKTBUF23 = RM_Field_FRC_PKTBUF5_PKTBUF23(self)
self.zz_fdict['PKTBUF23'] = self.PKTBUF23
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF6(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF6, self).__init__(rmio, label,
0xa8004000, 0x0EC,
'PKTBUF6', 'FRC.PKTBUF6', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF24 = RM_Field_FRC_PKTBUF6_PKTBUF24(self)
self.zz_fdict['PKTBUF24'] = self.PKTBUF24
self.PKTBUF25 = RM_Field_FRC_PKTBUF6_PKTBUF25(self)
self.zz_fdict['PKTBUF25'] = self.PKTBUF25
self.PKTBUF26 = RM_Field_FRC_PKTBUF6_PKTBUF26(self)
self.zz_fdict['PKTBUF26'] = self.PKTBUF26
self.PKTBUF27 = RM_Field_FRC_PKTBUF6_PKTBUF27(self)
self.zz_fdict['PKTBUF27'] = self.PKTBUF27
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF7(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF7, self).__init__(rmio, label,
0xa8004000, 0x0F0,
'PKTBUF7', 'FRC.PKTBUF7', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF28 = RM_Field_FRC_PKTBUF7_PKTBUF28(self)
self.zz_fdict['PKTBUF28'] = self.PKTBUF28
self.PKTBUF29 = RM_Field_FRC_PKTBUF7_PKTBUF29(self)
self.zz_fdict['PKTBUF29'] = self.PKTBUF29
self.PKTBUF30 = RM_Field_FRC_PKTBUF7_PKTBUF30(self)
self.zz_fdict['PKTBUF30'] = self.PKTBUF30
self.PKTBUF31 = RM_Field_FRC_PKTBUF7_PKTBUF31(self)
self.zz_fdict['PKTBUF31'] = self.PKTBUF31
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF8(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF8, self).__init__(rmio, label,
0xa8004000, 0x0F4,
'PKTBUF8', 'FRC.PKTBUF8', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF32 = RM_Field_FRC_PKTBUF8_PKTBUF32(self)
self.zz_fdict['PKTBUF32'] = self.PKTBUF32
self.PKTBUF33 = RM_Field_FRC_PKTBUF8_PKTBUF33(self)
self.zz_fdict['PKTBUF33'] = self.PKTBUF33
self.PKTBUF34 = RM_Field_FRC_PKTBUF8_PKTBUF34(self)
self.zz_fdict['PKTBUF34'] = self.PKTBUF34
self.PKTBUF35 = RM_Field_FRC_PKTBUF8_PKTBUF35(self)
self.zz_fdict['PKTBUF35'] = self.PKTBUF35
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF9(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF9, self).__init__(rmio, label,
0xa8004000, 0x0F8,
'PKTBUF9', 'FRC.PKTBUF9', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF36 = RM_Field_FRC_PKTBUF9_PKTBUF36(self)
self.zz_fdict['PKTBUF36'] = self.PKTBUF36
self.PKTBUF37 = RM_Field_FRC_PKTBUF9_PKTBUF37(self)
self.zz_fdict['PKTBUF37'] = self.PKTBUF37
self.PKTBUF38 = RM_Field_FRC_PKTBUF9_PKTBUF38(self)
self.zz_fdict['PKTBUF38'] = self.PKTBUF38
self.PKTBUF39 = RM_Field_FRC_PKTBUF9_PKTBUF39(self)
self.zz_fdict['PKTBUF39'] = self.PKTBUF39
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF10(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF10, self).__init__(rmio, label,
0xa8004000, 0x0FC,
'PKTBUF10', 'FRC.PKTBUF10', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF40 = RM_Field_FRC_PKTBUF10_PKTBUF40(self)
self.zz_fdict['PKTBUF40'] = self.PKTBUF40
self.PKTBUF41 = RM_Field_FRC_PKTBUF10_PKTBUF41(self)
self.zz_fdict['PKTBUF41'] = self.PKTBUF41
self.PKTBUF42 = RM_Field_FRC_PKTBUF10_PKTBUF42(self)
self.zz_fdict['PKTBUF42'] = self.PKTBUF42
self.PKTBUF43 = RM_Field_FRC_PKTBUF10_PKTBUF43(self)
self.zz_fdict['PKTBUF43'] = self.PKTBUF43
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PKTBUF11(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PKTBUF11, self).__init__(rmio, label,
0xa8004000, 0x100,
'PKTBUF11', 'FRC.PKTBUF11', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PKTBUF44 = RM_Field_FRC_PKTBUF11_PKTBUF44(self)
self.zz_fdict['PKTBUF44'] = self.PKTBUF44
self.PKTBUF45 = RM_Field_FRC_PKTBUF11_PKTBUF45(self)
self.zz_fdict['PKTBUF45'] = self.PKTBUF45
self.PKTBUF46 = RM_Field_FRC_PKTBUF11_PKTBUF46(self)
self.zz_fdict['PKTBUF46'] = self.PKTBUF46
self.PKTBUF47 = RM_Field_FRC_PKTBUF11_PKTBUF47(self)
self.zz_fdict['PKTBUF47'] = self.PKTBUF47
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_FCD0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_FCD0, self).__init__(rmio, label,
0xa8004000, 0x104,
'FCD0', 'FRC.FCD0', 'read-write',
u"",
0x000000FF, 0x0001FFFF,
0x00001000, 0x00002000,
0x00003000)
self.WORDS = RM_Field_FRC_FCD0_WORDS(self)
self.zz_fdict['WORDS'] = self.WORDS
self.BUFFER = RM_Field_FRC_FCD0_BUFFER(self)
self.zz_fdict['BUFFER'] = self.BUFFER
self.INCLUDECRC = RM_Field_FRC_FCD0_INCLUDECRC(self)
self.zz_fdict['INCLUDECRC'] = self.INCLUDECRC
self.CALCCRC = RM_Field_FRC_FCD0_CALCCRC(self)
self.zz_fdict['CALCCRC'] = self.CALCCRC
self.SKIPCRC = RM_Field_FRC_FCD0_SKIPCRC(self)
self.zz_fdict['SKIPCRC'] = self.SKIPCRC
self.SKIPWHITE = RM_Field_FRC_FCD0_SKIPWHITE(self)
self.zz_fdict['SKIPWHITE'] = self.SKIPWHITE
self.ADDTRAILTXDATA = RM_Field_FRC_FCD0_ADDTRAILTXDATA(self)
self.zz_fdict['ADDTRAILTXDATA'] = self.ADDTRAILTXDATA
self.EXCLUDESUBFRAMEWCNT = RM_Field_FRC_FCD0_EXCLUDESUBFRAMEWCNT(self)
self.zz_fdict['EXCLUDESUBFRAMEWCNT'] = self.EXCLUDESUBFRAMEWCNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_FCD1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_FCD1, self).__init__(rmio, label,
0xa8004000, 0x108,
'FCD1', 'FRC.FCD1', 'read-write',
u"",
0x000000FF, 0x0001FFFF,
0x00001000, 0x00002000,
0x00003000)
self.WORDS = RM_Field_FRC_FCD1_WORDS(self)
self.zz_fdict['WORDS'] = self.WORDS
self.BUFFER = RM_Field_FRC_FCD1_BUFFER(self)
self.zz_fdict['BUFFER'] = self.BUFFER
self.INCLUDECRC = RM_Field_FRC_FCD1_INCLUDECRC(self)
self.zz_fdict['INCLUDECRC'] = self.INCLUDECRC
self.CALCCRC = RM_Field_FRC_FCD1_CALCCRC(self)
self.zz_fdict['CALCCRC'] = self.CALCCRC
self.SKIPCRC = RM_Field_FRC_FCD1_SKIPCRC(self)
self.zz_fdict['SKIPCRC'] = self.SKIPCRC
self.SKIPWHITE = RM_Field_FRC_FCD1_SKIPWHITE(self)
self.zz_fdict['SKIPWHITE'] = self.SKIPWHITE
self.ADDTRAILTXDATA = RM_Field_FRC_FCD1_ADDTRAILTXDATA(self)
self.zz_fdict['ADDTRAILTXDATA'] = self.ADDTRAILTXDATA
self.EXCLUDESUBFRAMEWCNT = RM_Field_FRC_FCD1_EXCLUDESUBFRAMEWCNT(self)
self.zz_fdict['EXCLUDESUBFRAMEWCNT'] = self.EXCLUDESUBFRAMEWCNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_FCD2(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_FCD2, self).__init__(rmio, label,
0xa8004000, 0x10C,
'FCD2', 'FRC.FCD2', 'read-write',
u"",
0x000000FF, 0x0001FFFF,
0x00001000, 0x00002000,
0x00003000)
self.WORDS = RM_Field_FRC_FCD2_WORDS(self)
self.zz_fdict['WORDS'] = self.WORDS
self.BUFFER = RM_Field_FRC_FCD2_BUFFER(self)
self.zz_fdict['BUFFER'] = self.BUFFER
self.INCLUDECRC = RM_Field_FRC_FCD2_INCLUDECRC(self)
self.zz_fdict['INCLUDECRC'] = self.INCLUDECRC
self.CALCCRC = RM_Field_FRC_FCD2_CALCCRC(self)
self.zz_fdict['CALCCRC'] = self.CALCCRC
self.SKIPCRC = RM_Field_FRC_FCD2_SKIPCRC(self)
self.zz_fdict['SKIPCRC'] = self.SKIPCRC
self.SKIPWHITE = RM_Field_FRC_FCD2_SKIPWHITE(self)
self.zz_fdict['SKIPWHITE'] = self.SKIPWHITE
self.ADDTRAILTXDATA = RM_Field_FRC_FCD2_ADDTRAILTXDATA(self)
self.zz_fdict['ADDTRAILTXDATA'] = self.ADDTRAILTXDATA
self.EXCLUDESUBFRAMEWCNT = RM_Field_FRC_FCD2_EXCLUDESUBFRAMEWCNT(self)
self.zz_fdict['EXCLUDESUBFRAMEWCNT'] = self.EXCLUDESUBFRAMEWCNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_FCD3(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_FCD3, self).__init__(rmio, label,
0xa8004000, 0x110,
'FCD3', 'FRC.FCD3', 'read-write',
u"",
0x000000FF, 0x0001FFFF,
0x00001000, 0x00002000,
0x00003000)
self.WORDS = RM_Field_FRC_FCD3_WORDS(self)
self.zz_fdict['WORDS'] = self.WORDS
self.BUFFER = RM_Field_FRC_FCD3_BUFFER(self)
self.zz_fdict['BUFFER'] = self.BUFFER
self.INCLUDECRC = RM_Field_FRC_FCD3_INCLUDECRC(self)
self.zz_fdict['INCLUDECRC'] = self.INCLUDECRC
self.CALCCRC = RM_Field_FRC_FCD3_CALCCRC(self)
self.zz_fdict['CALCCRC'] = self.CALCCRC
self.SKIPCRC = RM_Field_FRC_FCD3_SKIPCRC(self)
| |
str(iend)
end_seq = pysam.faidx(genome, end_coord)
if start_seq.split('\n')[1].upper() == 'CT': # 3' splice site AG on minus strand
count_minus += 1
intron_label_list.append('-')
elif start_seq.split('\n')[1].upper() == 'GT': # 5' splice site GT on plus strand
count_plus += 1
intron_label_list.append('+')
else:
count_unknown += 1
intron_label_list.append('?')
if end_seq.split('\n')[1].upper() == 'AC': # 5' splice site GT on minus strand
count_minus += 1
intron_label_list.append('-')
elif end_seq.split('\n')[1].upper() == 'AG': # 3' splice site AG on plus strand
count_plus += 1
intron_label_list.append('+')
else:
count_unknown += 1
intron_label_list.append('?')
if max(count_minus, count_plus, count_unknown) == count_unknown:
strand_inferred = 'NA'
elif count_minus == count_plus:
strand_inferred = 'NA'
elif max(count_minus, count_plus, count_unknown) == count_minus:
strand_inferred = '-'
elif max(count_minus, count_plus, count_unknown) == count_plus:
strand_inferred = '+'
if verbose:
logger.info('inferred strand: {}', strand_inferred)
logger.info('counts: minus {} {} {} {} {}', count_minus, 'plus', count_plus, 'unknown', count_unknown)
return strand_inferred
def run(
input_bg, input_bg_minus, input_regions, output,
genome, plot, junc, minjxncount, juncdist,
min_length, test_thresh,
min_expn, min_expn_distal,
winsize, peak_thresh, peak_min_dist,
fcthresh, max_end_ru, verbose):
# --------------------------------------------------
# main routine
# --------------------------------------------------
logger.info('job starting: {}', str(datetime.now().time()))
if not input_bg:
logger.error('EXIT: Please provide --input_bg')
sys.exit(1)
if not input_regions:
logger.error('EXIT: Please provide --input_regions')
sys.exit(1)
if not output:
logger.error('EXIT: Please provide --output')
sys.exit(1)
if not input_bg_minus and not genome:
logger.error('EXIT: for non-strand-specific RNA-Seq, please provide a --genome file')
sys.exit(1)
plot_dir = os.path.splitext(output)[0] + '_plots'
if plot:
if os.path.exists(plot_dir):
for filename in os.listdir(plot_dir):
os.remove(os.path.join(plot_dir, filename))
else:
os.mkdir(plot_dir)
# === get bedgraph per gene ===
logger.info('bedtools intersect {}', str(datetime.now().time()))
bgfile = os.path.splitext(output)[0] + '_gene_bedgraph_intersect.txt'
if input_bg_minus: # strand-specific
bedgraph_per_gene_ss(input_regions, input_bg, input_bg_minus, bgfile)
else: # non-strand-specific
bedgraph_per_gene_nss(input_regions, input_bg, bgfile)
# === get introns from junction counts ===
logger.info('getting junction reads {}', str(datetime.now().time()))
intron2jxnCount = OrderedDict()
f = open(junc, 'r')
for line in f:
if not line.startswith('track'):
x = line.rstrip().split('\t')
if len(x) == 6:
(chrom, start, end, name, count, strand) = x
intron = ':'.join([chrom, start, end, strand])
elif len(x) == 5:
(chrom, start, end, name, count) = x
intron = ':'.join([chrom, start, end])
else:
logger.error('EXIT: did not recognize junction file format\n')
sys.exit(1)
if int(count) >= minjxncount:
if intron not in intron2jxnCount:
intron2jxnCount[intron] = count
else:
logger.error('seen intron')
sys.exit(1)
f.close()
# === find change points in each gene ===
count_genes_with_reads = 0
count_min_length_filter = 0
count_min_length_keep = 0
count_min_expn_filter = 0
count_min_expn_keep = 0
count_high_ksp = 0
count_vert_sum_max0 = 0
count_vert2_sum_max0 = 0
count_no_cps_afterfiltering = 0
count_no_cps_ttest0 = 0
count_cps_called = 0
count_filter123 = 0
count_genes_with_reads_annotated = 0
count_min_length_filter_annotated = 0
count_min_expn_filter_annotated = 0
logger.info('finding change points in each gene {}', str(datetime.now().time()))
maxl = 0
with open(bgfile, 'r') as f:
for l, line in enumerate(f):
maxl = l
o = open(output, 'w')
with open(bgfile, 'r') as f:
for l, line in enumerate(f):
# not EOF -> read the line
if line != '':
x = line.rstrip().split('\t')
if len(x) == 11:
(achrom, astart, aend, ageneid, ascore, astrand, bchrom, bstart, bend, bcov, overlap_len) = x
elif len(x) == 9:
(achrom, astart, aend, ageneid, bchrom, bstart, bend, bcov, overlap_len) = x
elif len(x) == 10:
(achrom, astart, aend, ageneid, ascore, bchrom, bstart, bend, bcov, overlap_len) = x
else:
logger.error('EXIT: do not recognize bedgraph intersect format\n')
logger.error(line)
sys.exit(1)
if not input_bg_minus:
astrand = 0
astart = int(astart)
aend = int(aend)
bstart = int(bstart)
bend = int(bend)
bcov = float(bcov)
else:
x = ''
if l == 0: # first line
prev_gene = ':'.join(x[:5]) if astrand == 0 else ':'.join(x[:6])
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array = np.zeros(aend - astart)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
# === next round ===
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
if l == maxl:
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
else:
this_gene = ':'.join(x[:5]) if astrand == 0 else ':'.join(x[:6])
if line == '' and this_gene == prev_gene and this_gene == '': # EOF
break
elif this_gene == prev_gene and l != maxl: # get coverage
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
# === next round ===
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
else: # finished reading all info for one gene -> call change points
# === get per-base coverage ===
if l == maxl:
# === last line of this gene & last line of the file ===
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
# === call change points in this gene ===
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
cov_array = prev_cov_array
new_start = new_start + start
new_end = new_end + start + 1
else:
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
# === call change points in the previous gene ===
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
cov_array = prev_cov_array
new_start = new_start + start
new_end = new_end + start + 1
# === next round: first line of the next gene ===
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array = np.zeros(aend - astart)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
prev_gene = this_gene
count_genes_with_reads += 1
if 'novel' not in geneid:
count_genes_with_reads_annotated += 1
if verbose:
logger.info('gene: {} {}:{}-{}:{} {}-{} {} {}', geneid, chrom, start, end, strand, new_start, new_end, \
cov_array.size, str(datetime.now().time()))
if cov_array.size >= min_length:
count_min_length_keep += 1
# === get per-base coverage ===
cov_avg = float(cov_sum) / float(cov_array.size)
# === get introns ===
intron_list = []
exon_list = []
jxn_list = []
jxn_list, exon_list, intron_list = get_introns_exons(intron2jxnCount, cov_array.size, chrom, new_start, new_end, strand)
if len(jxn_list) > 0:
cov_avg_exon_with_utr, max_exon_cov = get_exon_cov(exon_list, cov_array)
if strand != 0:
strand_inferred = strand
elif strand == 0:
# === infer strand ===
strand_inferred = infer_strand(intron_list, chrom, genome, verbose)
else:
cov_avg_exon_with_utr = cov_avg
max_exon_cov = cov_avg
strand_inferred = strand if strand != 0 else 'NA'
if verbose:
logger.debug('junctions: {}', jxn_list)
logger.debug('exons: {}', exon_list)
logger.debug('strand: {}', strand_inferred)
logger.debug('{} {}', cov_avg_exon_with_utr, max_exon_cov)
if min_expn != -1:
if cov_avg_exon_with_utr < min_expn:
count_min_expn_filter += 1
if 'novel' not in geneid:
count_min_expn_filter_annotated += 1
if verbose:
logger.debug('did not meet min expression level: {} {}', min_expn, cov_avg_exon_with_utr)
continue
else:
count_min_expn_keep += 1
# === calculate cumulative vertical distance per gene ===
vert_sum_array, vert_array = crs(cov_array)
if vert_sum_array[0] == 'NA':
count_vert_sum_max0 += 1
continue
# === KS test ===
ksp = ks_test(vert_sum_array, plot, os.path.join(plot_dir, geneid))
peak_inds_ttest_opt = []
if ksp < test_thresh:
vert2_array = vert_array * vert_array / max(vert_array)
vert2_vert_sum_array, vert2_vert_array = crs(vert2_array)
if vert2_vert_sum_array[0] == 'NA':
count_vert2_sum_max0 += 1
continue
# === get parameters ===
if winsize != -1:
denoise_winsize_list = [winsize]
else:
winsize_max = max(100, int(round(vert_sum_array.size / 100, -2)))
denoise_winsize_list = []
winsize_min = 100
while winsize_min <= min(winsize_max, 500):
denoise_winsize_list.append(winsize_min)
winsize_min = winsize_min + 100
denoise_winsize_list = sorted(denoise_winsize_list)
if verbose:
logger.debug('de-noising window sizes: {}', denoise_winsize_list)
amp_thresh_list = [peak_thresh] if peak_thresh != -1.0 else [0.05, 0.1, 0.15]
if verbose:
logger.debug('amplitude thresholds: {}', amp_thresh_list)
peak_min_dist_list = [peak_min_dist] if peak_min_dist != -1 else [10, 50]
if verbose:
logger.debug('min distance between peaks: {}', peak_min_dist_list)
h = 0
njxns_detected = -100
other_detected = 100
param2cp = {}
param2cpopt = {}
param2fcopt = {}
denoise_winsize_opt = 0
amp_thresh_opt = 0
peak_min_dist_opt = 0
peak_inds_ttest = []
peak_inds_ttest_with_ends = []
for denoise_winsize in denoise_winsize_list:
# get distance to line & denoise
if verbose:
logger.debug ('de-noising {} {}', denoise_winsize, str(datetime.now().time()))
line_dist_array2, line_dist_array2_denoise = get_linedist(vert2_vert_sum_array, denoise_winsize)
line_dist_array, line_dist_array_denoise = get_linedist(vert_sum_array, denoise_winsize)
a2totcp = {}
for a, amp_thresh in enumerate(amp_thresh_list):
if a > 0 and a2totcp[a - 1] == 0:
if verbose:
logger.debug('skipping higher amplitude thresholds because previous gave 0 change points')
a2totcp[a] = 0
else:
for peak_min_dist in peak_min_dist_list:
# --------------------------------------------------
# peak calling
# --------------------------------------------------
if verbose:
logger.debug('parameters: window size {} {} {} {} {} {}', denoise_winsize, 'amplitude', amp_thresh, 'min distance', peak_min_dist, str(datetime.now().time()))
peak_inds_ip_combined_filtered = []
peak_inds_ip_combined = []
peak_inds2_ip_combined = []
if np.unique(line_dist_array2_denoise).size != 1:
# === crs^2 ===
# call peaks
peak_inds2 = peakutils.peak.indexes(line_dist_array2_denoise, thres=amp_thresh, min_dist=peak_min_dist)
peak_inds_min2 = peakutils.peak.indexes(-1 * line_dist_array2_denoise, thres=amp_thresh, min_dist=peak_min_dist)
# increase precision in case de-noising smoothed too much: call peaks on the real data just in the window around the peaks called on smooth data
peak_inds2_ip = increase_precision(peak_inds2, denoise_winsize, line_dist_array2, 'max', amp_thresh)
peak_inds2_min_ip = increase_precision(peak_inds_min2, denoise_winsize, line_dist_array2, 'min', amp_thresh)
# combine
peak_inds2_ip_combined = np.sort(np.append(peak_inds2_ip, peak_inds2_min_ip))
if verbose:
logger.debug ('peak inds max: {} {}', len(peak_inds2), str(datetime.now().time()))
logger.debug ('peak inds min: {} {}', len(peak_inds_min2), str(datetime.now().time()))
logger.debug ('increased precision max: {} {}', len(peak_inds2_ip), str(datetime.now().time()))
logger.debug ('increased precision min: {} {}', len(peak_inds2_min_ip), str(datetime.now().time()))
logger.debug ('peak inds crs^2: {} {}', len(peak_inds2_ip_combined), str(datetime.now().time()))
if np.unique(line_dist_array_denoise).size != 1:
# === original crs ===
# call peaks
peak_inds = peakutils.peak.indexes(line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
peak_inds_min = peakutils.peak.indexes(-1 * line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
# increase precision in case de-noising smoothed too much: call peaks on the real data just in the window around the peaks called on smooth data
peak_inds_ip = increase_precision(peak_inds, denoise_winsize, line_dist_array, 'max', amp_thresh)
peak_inds_min_ip = increase_precision(peak_inds_min, denoise_winsize, line_dist_array, 'min', amp_thresh)
# combine
peak_inds_ip_combined = np.sort(np.append(peak_inds_ip, peak_inds_min_ip))
if verbose:
logger.debug ('peak inds max: {} {}', len(peak_inds), str(datetime.now().time()))
logger.debug ('peak inds min: {} {}', len(peak_inds_min), str(datetime.now().time()))
logger.debug ('increased precision max: {} {}', len(peak_inds_ip), str(datetime.now().time()))
logger.debug ('increased precision min: {} {}', len(peak_inds_min_ip), str(datetime.now().time()))
logger.debug ('peak inds crs: {} {}', len(peak_inds_ip_combined), str(datetime.now().time()))
if np.unique(line_dist_array2_denoise).size != 1:
if peak_inds_ip_combined.size != 0 and peak_inds2_ip_combined.size != 0:
peak_inds_ip_combined_filtered = np.append(peak_inds2_ip_combined, peak_inds_ip_combined[peak_inds_ip_combined < min(peak_inds2_ip_combined)])
peak_inds_ip_combined_filtered = np.unique(np.append(peak_inds_ip_combined_filtered, peak_inds_ip_combined[peak_inds_ip_combined > max(peak_inds2_ip_combined)]))
else:
peak_inds_ip_combined_filtered = np.unique(np.append(peak_inds2_ip_combined, peak_inds_ip_combined))
else:
peak_inds_ip_combined_filtered = np.copy(peak_inds_ip_combined)
# === UTRs: local crs ===
if len(jxn_list) > 0:
temp_cov_array = cov_array[:jxn_list[0]]
if len(temp_cov_array) > 0:
temp_vert_sum_array, temp_vert_array = crs(temp_cov_array)
if temp_vert_sum_array[0] != 'NA' and len(temp_vert_sum_array) > 0 and max(temp_vert_sum_array) > 0:
temp_ksp = ks_test(temp_vert_sum_array, 0, os.path.join(plot_dir, geneid) + | |
import numpy as np
from scipy.interpolate import lagrange
import matplotlib.pyplot as plt
import random as rd
import tqdm.auto as tqn
from numba import njit
##pylint
@njit
def solve_hydro(K, n, h, dH, len_t_mesure, t_mesure, dz, nb_cel, alpha=0.7):
Ss = n / h
H_array = np.zeros((len_t_mesure, nb_cel), dtype=np.float64)
H_array[0] = np.linspace(dH[0], 0, nb_cel).astype(np.float64)
for j in range(1, len_t_mesure):
dt = t_mesure[j - 1]
A = np.zeros((nb_cel, nb_cel), dtype=np.float64)
B = np.zeros((nb_cel, nb_cel), dtype=np.float64)
A[0][0] = 1
A[1][0] = 8 * alpha * K / (3 * ((dz) ** 2)) # terme limite
A[1][1] = -alpha * K * 4 / ((dz) ** 2) - Ss / dt
A[1][2] = 4 * alpha * K / (3 * ((dz) ** 2))
A[-1][nb_cel - 1] = 1
A[nb_cel - 2][nb_cel - 3] = (alpha) * K * 4 / (3 * (dz) ** 2)
A[nb_cel - 2][nb_cel - 2] = -(alpha) * K * 4 / ((dz) ** 2) - Ss / dt
A[nb_cel - 2][nb_cel - 1] = 8 * (alpha) * K / (3 * (dz) ** 2)
B[1][0] = -2 * (1 - alpha) * K * 4 / (3 * (dz) ** 2)
B[1][1] = (1 - alpha) * K * 4 / (dz) ** 2 - Ss / dt
B[1][2] = -(1 - alpha) * K * 4 / (3 * (dz) ** 2)
B[nb_cel - 2][nb_cel - 3] = -(1 - alpha) * K * 4 / (3 * (dz) ** 2)
B[nb_cel - 2][nb_cel - 2] = (1 - alpha) * K * 4 / (dz) ** 2 - Ss / dt
B[nb_cel - 2][nb_cel - 1] = -8 * (1 - alpha) * K / (3 * (dz) ** 2)
for i in range(2, nb_cel - 2):
A[i][i - 1] = K * alpha / dz ** 2
A[i][i] = -(2 * K * alpha / dz ** 2) - Ss / dt
A[i][i + 1] = K * alpha / dz ** 2
B[i][i - 1] = -K * (1 - alpha) / dz ** 2
B[i][i] = (2 * K * (1 - alpha) / dz ** 2) - Ss / dt
B[i][i + 1] = -K * (1 - alpha) / dz ** 2
C = np.dot(B, H_array[j - 1])
C[0], C[nb_cel - 1] = dH[j], 0
res = np.linalg.solve(A, C)
H_array[j] = res
delta_H = np.zeros((len_t_mesure, nb_cel - 1), dtype=np.float64)
for j in range(len_t_mesure):
for p in range(len(H_array[j]) - 1):
delta_H[j] = (H_array[j][p + 1] - H_array[j][p]) / dz
return np.asarray(delta_H)
@njit
def solve_thermique(
K,
lbds,
n,
pscs,
h,
len_temp,
t_mesure,
nb_cel,
grad_h,
rho_w,
c_w,
profondeur_init,
dH,
T_mesure,
alpha=0.7,
):
dz = h / nb_cel
lbdm = (n * np.sqrt(0.6) + (1 - n) * np.sqrt(lbds)) ** 2
pmcm = n * rho_w * c_w + (1 - n) * pscs
list_temp = np.zeros((len_temp, nb_cel), dtype=np.float64)
list_temp[0] = profondeur_init.astype(np.float64)
ke = lbdm / pmcm ##lbm/pmcm
ae = K * c_w * rho_w / pmcm # K *pwcw/pmcm
for j in range(1, len_temp):
dt = t_mesure[j - 1]
delta_H = grad_h[j]
A = np.zeros((nb_cel, nb_cel), dtype=np.float64)
B = np.zeros((nb_cel, nb_cel), dtype=np.float64)
A[0][0] = 1
A[nb_cel - 1][nb_cel - 1] = 1
A[1][0] = alpha * (2 * ke / dz ** 2 - ae * delta_H[1] / (2 * dz))
A[1][1] = alpha * (-2 * ke / dz ** 2) * (3 / 2) - 1 / dt
A[1][2] = alpha * (ke / dz ** 2 + ae * delta_H[1] / (2 * dz))
A[nb_cel - 2][nb_cel - 3] = alpha * (
ke / dz ** 2 + ae * delta_H[nb_cel - 2] / (2 * dz)
)
A[nb_cel - 2][nb_cel - 2] = alpha * (-2 * ke / dz ** 2) * (3 / 2) - 1 / dt
A[nb_cel - 2][nb_cel - 1] = alpha * (
2 * ke / dz ** 2 - ae * delta_H[nb_cel - 2] / (2 * dz)
)
B[0][0] = 1
B[nb_cel - 1][nb_cel - 1] = 1
B[1][0] = -(1 - alpha) * (2 * ke / dz ** 2 - ae * delta_H[1] / (2 * dz))
B[1][1] = -(1 - alpha) * (-2 * ke / dz ** 2) * (3 / 2) - 1 / dt
B[1][2] = -(1 - alpha) * (ke / dz ** 2 + ae * delta_H[1] / (2 * dz))
B[nb_cel - 2][nb_cel - 3] = -(1 - alpha) * (
ke / dz ** 2 + ae * delta_H[nb_cel - 2] / (2 * dz)
)
B[nb_cel - 2][nb_cel - 2] = (
-(1 - alpha) * (-2 * ke / dz ** 2) * (3 / 2) - 1 / dt
)
B[nb_cel - 2][nb_cel - 1] = -(1 - alpha) * (
2 * ke / dz ** 2 - ae * delta_H[nb_cel - 2] / (2 * dz)
)
for i in range(2, nb_cel - 2):
A[i][i - 1] = alpha * (ke / dz ** 2 - ae * delta_H[i] / (2 * dz))
A[i][i] = alpha * (-2 * ke / dz ** 2) - 1 / dt
A[i][i + 1] = alpha * (ke / dz ** 2 + ae * delta_H[i] / (2 * dz))
B[i][i - 1] = -(1 - alpha) * (ke / dz ** 2 - ae * delta_H[i] / (2 * dz))
B[i][i] = -(1 - alpha) * (-2 * ke / dz ** 2) - 1 / dt
B[i][i + 1] = -(1 - alpha) * (ke / dz ** 2 + ae * delta_H[i] / (2 * dz))
C = np.dot(B, list_temp[j - 1])
C[0], C[nb_cel - 1] = dH[j], T_mesure[j][-1]
res = np.linalg.solve(A, C)
list_temp[j] = res
return np.asarray(list_temp)
class Column:
@classmethod
def from_dict(cls, col_dict):
return cls(**col_dict)
def __init__(
self,
river_bed,
offset,
depth_sensors,
dH_measures,
T_measures,
sigma_meas_P,
sigma_meas_T,
):
self._dH = dH_measures
self._T_mesure = T_measures
self._h = depth_sensors[-1]
self._profondeur_mesure = depth_sensors
self._dh = offset
self._sigma_p = sigma_meas_P
self._sigma_temp = sigma_meas_T
self._rho_w = 1000
self._c_w = 4180
self._t_mesure = [i[0] for i in self._dH]
self._T_mesure_int = [i[1][0:3] for i in T_measures]
self.grad_H = []
self.res_T = []
self.debit = []
self.distrib_a_posteriori = None
self.energie = None
self.moy_acceptation = None
self.run_mcmc = False
self.profil_temp_quantile = None
self.param_quantile = None
self.advec_flows = None
self.conduc_flows = None
self.debit_quantile = None
self.flux_adv_quantile = None
self.flux_cond_quantile = None
def solve_transi(self, param: dict, alpha=0.7):
K = 10 ** (-param["moinslog10K"])
lbds = param["lambda_s"]
n = param["n"]
pscs = param["rhos_cs"]
nb_cel = param["nb_cel"]
lbdm = (n * np.sqrt(0.6) + (1 - n) * np.sqrt(lbds)) ** 2
array_times = np.zeros((len(self._t_mesure)), dtype=np.float64)
for j in range(1, len(self._t_mesure)):
a = float((self._t_mesure[j] - self._t_mesure[j - 1]).total_seconds())
array_times[j - 1] = a
H_use = np.array([i[1][0] for i in self._dH])
T_up_use = np.array([i[1][1] for i in self._dH])
len_time = len(self._t_mesure)
delta_H = solve_hydro(
K, n, self._h, H_use, len_time, array_times, self._h / float(nb_cel), nb_cel
)
self.grad_H.append(np.asarray(delta_H))
self.debit = [-K * i[0] for i in self.grad_H[-1]]
Temp_use = np.asarray([i[1] for i in self._T_mesure], dtype=np.float64)
coef = lagrange(
[0] + self._profondeur_mesure, [self._dH[0][1][1]] + self._T_mesure[0][1]
)
profondeur = np.linspace([0], self._profondeur_mesure[-1], nb_cel)
profondeur_inter = coef(profondeur)
profondeur_inter = np.array([i[0] for i in profondeur_inter], dtype=np.float64)
res_temp = solve_thermique(K,lbds,n,pscs,self._h,len_time,array_times,nb_cel,delta_H,self._rho_w,self._c_w,profondeur_inter,T_up_use,Temp_use)
dz = self._h / nb_cel
self.advec_flows = self._rho_w * self._c_w * delta_H * res_temp[:, :-1]
self.conduc_flows = lbdm * np.gradient(
res_temp, np.linspace(0, self._h, nb_cel), axis=-1
)
self.res_T.append(res_temp)
return res_temp, delta_H
def mcmc(self, priors: dict, nb_iter: int, nb_cel: int, quantile):
self.run_mcmc = True
def pi(T_mesure, T_calcul, sigma_obs, norm_init=1):
T_mesure = np.array(T_mesure)
T_calcul = np.array(T_calcul).transpose()
return np.exp(
(-0.5 / (sigma_obs ** 2)) * np.linalg.norm(T_mesure - T_calcul) ** 2
)
def compute_energy(T_mesure, T_calcul, sigma_obs):
T_mesure = np.array(T_mesure)
T_calcul = np.array(T_calcul).transpose()
return (0.5 / (sigma_obs ** 2)) * np.linalg.norm(T_mesure - T_calcul) ** 2
def perturbation(borne_inf, borne_sup, previous_value, sigma):
new_value = np.random.normal(previous_value, sigma)
while new_value - borne_sup > | |
"""
Topology Graph
==============
"""
from functools import partial
import numpy as np
from stk.utilities import flatten
from ..construction_result import ConstructionResult
from ..construction_state import ConstructionState
from ..edge_group import EdgeGroup
from .implementations import _Parallel, _Serial
class TopologyGraph:
"""
An abstract base class for topology graphs.
It is responsible for the construction of molecules. To create
a new topology graph, you want to subclass and implement this
abstract base class.
Notes
-----
*Adding New Topology Graphs*
You might notice that some of the methods of this abstract base
class are implemented. This is purely for convenience when
implementing subclasses. The implemented public methods are
simply default implementations, which can safely be ignored or
overridden, when implementing subclasses. Any private methods are
implementation details of these default implementations.
Many classes, such as :class:`.Vertex`, :class:`.Edge`,
:class:`.EdgeGroup` and :class:`.ConstructionState`, exist as
implementation details of this default :class:`.TopologyGraph`
implementation. You could ignore all of them, and define a new
:meth:`.construct` method from scratch. In fact, your topology
graph does not have to be represented as a graph at all. However,
using the default implementation of :class:`.TopologyGraph` makes
it significantly easier to implement a construction process. When
using the default implementation of :class:`.TopologyGraph`, you
mostly just need to implement a :class:`.Vertex` subclass, which
is much easier than figuring out the whole construction process
from scratch. In addition, you get benefits like parallel
construction for free, as it is included in the default
implementation.
Typically, adding a new topology graph will involve implementing
any pure virtual methods of :class:`.TopologyGraph`, in a new
subclass, as well as implementing any pure virtual methods of
:class:`.Vertex`, again in a new subclass. Combined, this is just a
handful of simple methods to implement. Sometimes, rarely, you
might also want to subclass :class:`.ConstructionState`, when you
want to add additional hooks during construction, by extending
the methods of this class. If you do this, make sure
to override :meth:`._get_construction_state` to return your
subclass of :class:`.ConstructionState`, rather than the base
class, as is done by default. You can subclass and extend the
methods of any class as you wish, but it would be unusual if this
doesn't cover all your requirements.
*The Default Implementation*
The default implementation of :class:`.TopologyGraph` represents
the constructed molecule through a graph. The vertices indicate
where building blocks are placed and the edges indicate which
building blocks have bonds formed between them by the construction
process.
:class:`.Vertex` instances are responsible for placing the building
block molecules. By initializing the vertices with different
parameters, you can alter how they position the building block
molecules, and therefore allow the user to easily specify a
different structural isomer.
Once a building block is placed on a vertex, the functional groups
on the building block must be mapped to the different edges
connected to the vertex. The number of functional groups in the
building block must match the number of edges connected to the
vertex.
Once the functional groups are mapped to edges, the edges are
used to perform reactions on the building blocks. Edges are
grouped in an :class:`.EdgeGroup`, and all functional groups
present in the edge group are reacted together. Normally, unless
you are doing something very exotic, an :class:`.EdgeGroup` will
hold just one :class:`.Edge`, and the two functional groups on
that edge will be reacted together through a single
:class:`.Reaction`. This reaction will normally add the bonds which
are required to form the joined-up constructed molecule, but note
that it does not have to add any bonds at all. In addition, a
:class:`.Reaction` can add and remove atoms from the constructed
molecule. Which reaction is selected to join the functional groups
depends on the :class:`.ReactionFactory` given to the
:class:`.TopologyGraph` during initialization.
Once this is done, you have a :class:`.ConstructedMolecule`.
Examples
--------
*Subclass Implementation*
The source code of subclasses, listed in
:mod:`~.topology_graph.topology_graph.topology_graph`, can serve
as good examples.
*Changing the Building Blocks of a Topology Graph*
To change the building blocks used by a topology graph you
can use :meth:`.with_building_blocks` to get a clone of the
topology graph holding the new building blocks
.. testcode:: changing-the-building-blocks-of-a-topology-graph
import stk
bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()])
bb2 = stk.BuildingBlock('BrCCCBr', [stk.BromoFactory()])
linear = stk.polymer.Linear(
building_blocks=(bb1, bb2),
repeating_unit='A',
num_repeating_units=15,
)
bb3 = stk.BuildingBlock('BrCNCBr', [stk.BromoFactory()])
# All bb1 instances are replaced by bb3, but bb2 remains
# in place.
clone = linear.with_building_blocks({
bb1: bb3,
})
"""
def __init__(
self,
building_block_vertices,
edges,
reaction_factory,
construction_stages,
num_processes,
optimizer,
edge_groups=None,
):
"""
Initialize an instance of :class:`.TopologyGraph`.
Parameters
----------
building_block_vertices : :class:`dict`
Maps each :class:`.BuildingBlock` to be placed, to a
:class:`tuple` of :class:`.Vertex` instances, on which
it should be placed.
edges : :class:`tuple` of :class:`.Edge`
The edges which make up the topology graph.
reaction_factory : :class:`.ReactionFactory`
Used to pick which :class:`.Reaction` is used on each
:class:`.EdgeGroup` of the topology graph.
construction_stages : :class:`tuple` of :class:`callable`
A collection of callables, each of which takes a
:class:`.Vertex` and returns ``True`` or ``False``.
If the first :class:`callable` is applied to a vertex in
the topology graph, and the result is ``True``, that vertex
is a part of the first construction stage. The second
:class:`callable` is then applied to all vertices not in
the first stage and those which return ``True`` belong to
the second stage and so on.
Vertices which belong to the same construction stage
all place building blocks together in parallel, before
placement is done by any vertices which are part of a later
stage. This breaks down parallel construction into
serial stages if synchronization between stages is needed.
If the topology graph is performing construction serially,
then all vertices which belong to an earlier stage will
place their building block before those at a later stage.
num_processes : :class:`int`
The number of parallel processes to create during
:meth:`construct`.
optimizer : :class:`.Optimizer`
Used to optimize the structure of the constructed
molecule.
edge_groups : :class:`tuple` of :class:`.EdgeGroup`, optional
The edge groups of the topology graph, if ``None``, every
:class:`.Edge` is in its own edge group.
"""
self._scale = scale = self._get_scale(building_block_vertices)
def apply_scale(item):
return item.with_scale(scale)
self._building_block_vertices = {
building_block: tuple(map(apply_scale, vertices))
for building_block, vertices
in building_block_vertices.items()
}
self._edges = tuple(map(apply_scale, edges))
self._reaction_factory = reaction_factory
if num_processes == 1:
self._implementation = _Serial(
stages=tuple(self._get_stages(construction_stages)),
)
else:
self._implementation = _Parallel(
stages=tuple(self._get_stages(construction_stages)),
num_processes=num_processes,
)
if edge_groups is None:
edge_groups = tuple(
EdgeGroup((edge, )) for edge in self._edges
)
self._edge_groups = edge_groups
self._optimizer = optimizer
def _with_building_blocks(self, building_block_map):
"""
Modify the topology graph.
"""
# The original scaling first needs to be removed, so that when
# the scale is recalculated with the new building blocks, it
# has the same starting geometry.
def undo_scale(vertex):
return vertex.with_scale(1/self._scale)
building_block_vertices = {
building_block_map.get(building_block, building_block):
tuple(map(undo_scale, vertices))
for building_block, vertices
in self._building_block_vertices.items()
}
scale = self._get_scale(building_block_vertices)
def scale_vertex(vertex):
return vertex.with_scale(scale)
self._building_block_vertices = {
building_block: tuple(map(scale_vertex, vertices))
for building_block, vertices
in building_block_vertices.items()
}
def scale_edge(edge):
# Remove the old scale and apply the new one.
return edge.with_scale(scale/self._scale)
self._edges = edges = tuple(map(scale_edge, self._edges))
def get_new_edge(edge_id):
return edges[edge_id]
self._edge_groups = tuple(
EdgeGroup(map(get_new_edge, edge_group.get_edge_ids()))
for edge_group in self._edge_groups
)
self._scale = scale
return self
def with_building_blocks(self, building_block_map):
"""
Return a clone holding different building blocks.
Parameters
----------
building_block_map : :class:`dict`
Maps a building block in the current topology
graph to the building block which should replace
it in the clone. If a building block should be not replaced
in the clone, it can be omitted from the map.
Returns
-------
:class:`.TopologyGraph`
The clone. Has the same type as the original topology
graph.
"""
return self.clone()._with_building_blocks(building_block_map)
def clone(self):
"""
Return a clone.
Returns
-------
:class:`.TopologyGraph`
The clone. Has the same type as the original topology
graph.
"""
clone = self.__class__.__new__(self.__class__)
clone._scale = self._scale
clone._building_block_vertices = dict(
self._building_block_vertices
)
clone._edges = self._edges
clone._reaction_factory = self._reaction_factory
clone._implementation = self._implementation
clone._optimizer = self._optimizer
clone._edge_groups = self._edge_groups
return clone
def get_building_blocks(self):
"""
Yield the building blocks.
Building blocks are yielded in an order based | |
: float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""(Non-adjusted) Rand index.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
rand_index : float > 0
Rand index
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
ari_score : float > 0
Adjusted Rand index between segmentations.
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then | |
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar10", **kwargs)
def densenet40_k36_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar100", **kwargs)
def densenet40_k36_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_svhn", **kwargs)
def densenet100_k12_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar10", **kwargs)
def densenet100_k12_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar100", **kwargs)
def densenet100_k12_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_svhn", **kwargs)
def densenet100_k24_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar10", **kwargs)
def densenet100_k24_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar100", **kwargs)
def densenet100_k24_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_svhn", **kwargs)
def densenet100_k12_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar10", **kwargs)
def densenet100_k12_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar100", **kwargs)
def densenet100_k12_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_svhn", **kwargs)
def densenet190_k40_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar10", **kwargs)
def densenet190_k40_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar100", **kwargs)
def densenet190_k40_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_svhn", **kwargs)
def densenet250_k24_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar10", **kwargs)
def densenet250_k24_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar100", **kwargs)
def densenet250_k24_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(densenet40_k12_cifar10, 10),
(densenet40_k12_cifar100, 100),
(densenet40_k12_svhn, 10),
(densenet40_k12_bc_cifar10, 10),
| |
(flow_out['var_value'] - flow_in['var_value'])
losses = losses.reset_index()
return losses
def aggregate_storage_capacities(oemoflex_scalars):
storage = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['storage_capacity', 'storage_capacity_invest'])].copy()
# Make sure that values in columns used to group on are strings and thus equatable
storage[basic_columns] = storage[basic_columns].astype(str)
storage = storage.groupby(by=basic_columns, as_index=False).sum()
storage['var_name'] = 'storage_capacity_sum'
storage['var_value'] = storage['var_value'] * 1e-3 # MWh -> GWh
storage['var_unit'] = 'GWh'
charge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_charge', 'capacity_charge_invest'])]
charge = charge.groupby(by=basic_columns, as_index=False).sum()
charge['var_name'] = 'capacity_charge_sum'
charge['var_unit'] = 'MW'
discharge = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity_discharge', 'capacity_discharge_invest'])]
discharge = discharge.groupby(by=basic_columns, as_index=False).sum()
discharge['var_name'] = 'capacity_discharge_sum'
discharge['var_unit'] = 'MW'
return pd.concat([storage, charge, discharge])
def aggregate_other_capacities(oemoflex_scalars):
capacities = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(['capacity', 'invest'])
].copy()
# Make sure that values in columns used to group on are strings and thus equatable
capacities[basic_columns] = capacities[basic_columns].astype(str)
capacities = capacities.groupby(by=basic_columns, as_index=False).sum()
capacities['var_name'] = 'capacity_sum'
capacities['var_unit'] = 'MW'
return capacities
def get_emissions(oemoflex_scalars, scalars_raw):
try:
emissions = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'cost_emission'].copy()
except KeyError:
logging.info("No key 'cost_emissions' found to calculate 'emissions'.")
return None
price_emission = get_parameter_values(scalars_raw, 'Energy_Price_CO2')
emissions['var_value'] *= 1/price_emission
emissions['var_name'] = 'emissions'
emissions['var_unit'] = 'tCO2'
return emissions
def map_link_direction(oemoflex_scalars):
r"""Swaps name and region for backward flows of links."""
backward = (
(oemoflex_scalars['type'] == 'link') &
(oemoflex_scalars['var_name'].str.contains('backward'))
)
def swap(series, delimiter):
return series.str.split(delimiter).apply(lambda x: delimiter.join(x[::-1]))
def drop_regex(series, regex):
return series.str.replace(regex, '', regex=True)
oemoflex_scalars.loc[backward, 'name'] = swap(oemoflex_scalars.loc[backward, 'name'], '-')
oemoflex_scalars.loc[backward, 'region'] = swap(oemoflex_scalars.loc[backward, 'region'], '_')
oemoflex_scalars.loc[:, 'var_name'] = drop_regex(
oemoflex_scalars.loc[:, 'var_name'], '.backward|.forward'
)
return oemoflex_scalars
def map_to_flexmex_results(oemoflex_scalars, flexmex_scalars_template, mapping, scenario):
mapping = mapping.set_index('Parameter')
flexmex_scalars = flexmex_scalars_template.copy()
oemoflex_scalars = oemoflex_scalars.set_index(['region', 'carrier', 'tech', 'var_name'])
oemoflex_scalars.loc[oemoflex_scalars['var_unit'] == 'MWh', 'var_value'] *= 1e-3 # MWh to GWh
for i, row in flexmex_scalars.loc[flexmex_scalars['UseCase'] == scenario].iterrows():
try:
select = mapping.loc[row['Parameter'], :]
except KeyError:
continue
try:
value = oemoflex_scalars.loc[
(row['Region'],
select['carrier'],
select['tech'],
select['var_name']), 'var_value']
except KeyError:
logging.info(
f"No key "
f"{(row['Region'], select['carrier'], select['tech'], select['var_name'])}"
f"found to be mapped to FlexMex."
)
continue
if isinstance(value, float):
flexmex_scalars.loc[i, 'Value'] = np.around(value)
flexmex_scalars.loc[:, 'Modell'] = 'oemof'
return flexmex_scalars
def get_varom_cost(oemoflex_scalars, prep_elements):
r"""
Calculates the VarOM cost by multiplying consumption by marginal cost.
Which value is taken as consumption depends on the actual technology type.
Parameters
----------
oemoflex_scalars
prep_elements
Returns
-------
"""
varom_cost = []
for prep_el in prep_elements.values():
if 'marginal_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] == 'excess':
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
elif prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_electricity']
elif prep_el['type'][0] in ['link', 'electrical line']:
net_flows = ['flow_net_forward', 'flow_net_backward']
flow = oemoflex_scalars.loc[
oemoflex_scalars['var_name'].isin(net_flows)]
flow = flow.groupby(basic_columns, as_index=False).sum()
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_out']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['marginal_cost']
df['var_name'] = 'cost_varom'
varom_cost.append(df)
varom_cost = pd.concat(varom_cost)
varom_cost['var_unit'] = 'Eur'
return varom_cost
def get_carrier_cost(oemoflex_scalars, prep_elements):
carrier_cost = []
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
df = prep_el[basic_columns]
if prep_el['type'][0] in ['backpressure', 'extraction']:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_fuel']
else:
flow = oemoflex_scalars.loc[oemoflex_scalars['var_name'] == 'flow_in']
df = pd.merge(
df, flow,
on=basic_columns
)
df['var_value'] = df['var_value'] * prep_el['carrier_cost']
df['var_name'] = 'cost_carrier'
carrier_cost.append(df)
if carrier_cost:
carrier_cost = pd.concat(carrier_cost)
else:
carrier_cost = pd.DataFrame(carrier_cost)
carrier_cost['var_unit'] = 'Eur'
return carrier_cost
def get_fuel_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the fuel costs from the carrier costs if there are CO2 emissions.
Bypass for non-emission carriers (cost_carrier = cost_fuel).
Having emissions or not is determined by the parameter mapping dict (emission_factor).
TODO Let's think about using the 'flow' values as input because this way we could
generalize the structure with get_varom_cost() and get_emission_cost() into one function
for all 'flow'-derived values.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
fuel_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price'])\
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_carrier / (price_carrier + price_emission)
# Otherwise take the carrier cost value for the fuel cost
else:
factor = 1.0
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
# Update other columns
df['var_name'] = 'cost_fuel'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
fuel_cost = pd.concat([fuel_cost, df])
return fuel_cost
def get_emission_cost(oemoflex_scalars, prep_elements, scalars_raw):
r"""
Re-calculates the emission costs from the carrier costs if there are CO2 emissions.
Structure only slightly different (+ else branch) from get_fuel_cost() because there are costs
of zero instead of the fuel costs (in get_fuel_cost()) if there are no emissions.
Parameters
----------
oemoflex_scalars
prep_elements
scalars_raw
Returns
-------
"""
emission_cost = pd.DataFrame()
# Iterate over oemof.tabular components (technologies)
for prep_el in prep_elements.values():
if 'carrier_cost' in prep_el.columns:
# Set up a list of the current technology's elements
df = prep_el.loc[:, basic_columns]
# Select carriers from the parameter map
carrier_name = prep_el['carrier'][0]
parameters = FlexMex_Parameter_Map['carrier'][carrier_name]
# Only re-calculate if there is a CO2 emission
if 'emission_factor' in parameters.keys():
price_carrier = get_parameter_values(scalars_raw, parameters['carrier_price'])
price_emission = get_parameter_values(scalars_raw, parameters['co2_price']) \
* get_parameter_values(scalars_raw, parameters['emission_factor'])
factor = price_emission / (price_carrier + price_emission)
df = get_calculated_parameters(df, oemoflex_scalars, 'cost_carrier', factor)
else:
df['var_value'] = 0.0
# Update other columns
df['var_name'] = 'cost_emission'
df['var_unit'] = 'Eur'
# Append current technology elements to the return DataFrame
emission_cost = pd.concat([emission_cost, df])
return emission_cost
def get_calculated_parameters(df, oemoflex_scalars, parameter_name, factor):
r"""
Takes the pre-calculated parameter 'parameter_name' from
'oemoflex_scalars' DataFrame and returns it multiplied by 'factor' (element-wise)
with 'df' as a template
Parameters
----------
df
output template DataFrame
oemoflex_scalars
DataFrame with pre-calculated parameters
parameter_name
parameter to manipulate
factor
factor to multiply parameter with
Returns
-------
"""
calculated_parameters = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == parameter_name].copy()
if calculated_parameters.empty:
logging.info("No key '{}' found as input"
"for postprocessing calculation.".format(parameter_name))
# Make sure that values in columns to merge on are strings
# See here:
# https://stackoverflow.com/questions/39582984/pandas-merging-on-string-columns-not-working-bug
calculated_parameters[basic_columns] = calculated_parameters[basic_columns].astype(str)
df = pd.merge(
df, calculated_parameters,
on=basic_columns
)
df['var_value'] = df['var_value'] * factor
return df
def get_invest_cost(oemoflex_scalars, prep_elements, scalars_raw):
invest_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# In the following line: Not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
interest = get_parameter_values(
scalars_raw,
'EnergyConversion_InterestRate_ALL') * 1e-2 # percent -> 0...1
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['charge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
annualized_cost)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
lifetime = get_parameter_values(scalars_raw, parameters['discharge_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
annualized_cost)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
lifetime = get_parameter_values(scalars_raw, parameters['storage_lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
annualized_cost)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
lifetime = get_parameter_values(scalars_raw, parameters['lifetime'])
annualized_cost = annuity(capex=capex, n=lifetime, wacc=interest)
df = get_calculated_parameters(df, oemoflex_scalars, 'invest', annualized_cost)
df['var_name'] = 'cost_invest'
df['var_unit'] = 'Eur'
invest_cost = pd.concat([invest_cost, df])
return invest_cost
def get_fixom_cost(oemoflex_scalars, prep_elements, scalars_raw):
fixom_cost = pd.DataFrame()
for prep_el in prep_elements.values():
# not 'is'! pandas overloads operators!
if 'expandable' in prep_el.columns and prep_el['expandable'][0] == True: # noqa: E712, E501 # pylint: disable=C0121
# element is expandable --> 'invest' values exist
df = prep_el[basic_columns]
tech_name = prep_el['tech'][0]
parameters = FlexMex_Parameter_Map['tech'][tech_name]
# Special treatment for storages
if tech_name in ['h2_cavern', 'liion_battery']:
# One fix cost factor for all sub-components
fix_cost_factor = get_parameter_values(
scalars_raw, parameters['fixom']) * 1e-2 # percent -> 0...1
# Charge device
capex = get_parameter_values(scalars_raw, parameters['charge_capex'])
df_charge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_charge_invest',
fix_cost_factor * capex)
# Discharge device
capex = get_parameter_values(scalars_raw, parameters['discharge_capex'])
df_discharge = get_calculated_parameters(df, oemoflex_scalars,
'capacity_discharge_invest',
fix_cost_factor * capex)
# Storage cavern
capex = get_parameter_values(scalars_raw,
parameters['storage_capex']) * 1e-3 # €/MWh -> €/GWh
df_storage = get_calculated_parameters(df, oemoflex_scalars,
'storage_capacity_invest',
fix_cost_factor * capex)
df = pd.concat([df_charge, df_discharge, df_storage])
# Sum the 3 amounts per storage, keep indexes as columns
df = df.groupby(by=basic_columns, as_index=False).sum()
else:
capex = get_parameter_values(scalars_raw, parameters['capex'])
fix_cost_factor = get_parameter_values(
scalars_raw, | |
<filename>kdcovid/search_tool.py
import csv
import pickle
import time
from string import punctuation
import re
import numpy as np
import sent2vec
import torch
from absl import logging
from nltk import word_tokenize
from nltk.corpus import stopwords
from spacy import displacy
from dateutil import parser as dateparser
logging.set_verbosity(logging.INFO)
DEFAULT_DATE = "2019"
covid_strings = ["covid-19", "covid19", "covid", "sars-cov-2",
"sars-cov2", "sarscov2", "novel coronavirus",
"2019-ncov", "2019ncov"]
patterns = [re.compile(s, re.IGNORECASE) for s in covid_strings]
def _check_covid(paper):
if any(re.search(p, paper["abstract"]) for p in patterns):
return True
return False
class SearchTool(object):
def __init__(self, data_dir='./', use_cached=False, paper_id_field='cord_uid', all_vecs=None, all_meta=None,
model=None, metadata_file=None, documents=None, entity_links=None, cached_result_file=None):
t_start = time.time()
self.cached_results = None
if use_cached:
with open('%s/cached_results.pkl' % data_dir, 'rb') as fin:
self.cached_results = pickle.load(fin)
elif all_vecs is not None:
self.all_vecs = all_vecs
self.all_meta = all_meta
self.model = model
t = time.time()
logging.info('Loading Paper Meta Data...')
self.paper_id_field = paper_id_field
self.paper_index = {}
num_covid = 0
with open(metadata_file) as f:
reader = csv.DictReader(f, delimiter=',')
for paper in reader:
self.paper_index[paper[self.paper_id_field]] = paper
self.paper_index[paper[self.paper_id_field]]['covid'] = _check_covid(paper)
try:
self.paper_index[paper[self.paper_id_field]]['date'] = dateparser.parse(paper['publish_time'])
except:
self.paper_index[paper[self.paper_id_field]]['date'] = dateparser.parse(DEFAULT_DATE)
num_covid += int(self.paper_index[paper['sha']]['covid'])
logging.info("Found %d covid papers from %d total" %
(num_covid, len(self.paper_index)))
logging.info('Loading Paper Meta Data...Done! %s seconds' % (time.time() - t))
self.doc2sec2text = documents
self.entity_links = entity_links
if cached_result_file is not None:
with open('%s/cached_results.pkl' % data_dir, 'rb') as fin:
self.cached_results = pickle.load(fin)
else:
logging.info('Using data dir %s', data_dir)
self.data_dir = data_dir
t = time.time()
logging.info('Loading Paper Meta Data...')
self.paper_id_field = paper_id_field
self.paper_index = {}
num_covid = 0
with open('%s/metadata.csv' % data_dir) as f:
reader = csv.DictReader(f, delimiter=',')
for paper in reader:
self.paper_index[paper[self.paper_id_field]] = paper
self.paper_index[paper[self.paper_id_field]]['covid'] = _check_covid(paper)
try:
self.paper_index[paper[self.paper_id_field]]['date'] = dateparser.parse(
paper['publish_time'])
except:
self.paper_index[paper[self.paper_id_field]]['date'] = dateparser.parse(DEFAULT_DATE)
num_covid += int(self.paper_index[paper[self.paper_id_field]]['covid'])
logging.info("Found %d covid papers from %d total" %
(num_covid, len(self.paper_index)))
logging.info('Loading Paper Meta Data...Done! %s seconds' % (time.time() - t))
t = time.time()
logging.info('Loading section text...')
with open('%s/all_sections.pkl' % data_dir, 'rb') as fin:
self.doc2sec2text = pickle.load(fin)
logging.info('Loading section text...Done! %s seconds' % (time.time() - t))
t = time.time()
logging.info('Loading entity links...')
with open('%s/combined_links.pickle' % data_dir, 'rb') as fin:
self.entity_links = pickle.load(fin)
logging.info('Loading entity links...Done! %s seconds' % (time.time() - t))
logging.info('Loading sentence vectors...')
t = time.time()
self.all_vecs = np.load('%s/all.npy' % data_dir)
with open('%s/all.pkl' % data_dir, 'rb') as fout:
self.all_meta = pickle.load(fout)
logging.info("%s", self.all_meta[0:5])
logging.info('Loading sentence vectors... done! %s seconds' % (time.time() - t))
logging.info('Unit norming the vectors')
t = time.time()
norms = np.linalg.norm(self.all_vecs, axis=1, keepdims=True)
norms[norms == 0] = 1
self.all_vecs /= norms
self.all_vecs = torch.from_numpy(self.all_vecs).detach()
logging.info('Done unit norming the vectors! %s seconds' % (time.time() - t))
t = time.time()
logging.info('Loading BioSentVec Model...')
model_path = '%s/BioSentVec_PubMed_MIMICIII-bigram_d700.bin' % data_dir
self.model = sent2vec.Sent2vecModel()
try:
self.model.load_model(model_path)
except Exception as e:
print(e)
logging.info('Loading BioSentVec Model... done! %s seconds' % (time.time() - t))
self.colors = {'Highlight': 'linear-gradient(90deg, #aa9cfc, #fc9ce7)', 'disease': '#ffe4b5', 'gene': '#ffa07a'}
self.stop_words = set(stopwords.words('english'))
logging.info('Finished setting up constructor in %s seconds' % (time.time() - t_start))
def preprocess_sentence(self, text):
text = text.replace('/', ' / ')
text = text.replace('.-', ' .- ')
text = text.replace('.', ' . ')
text = text.replace('\'', ' \' ')
text = text.lower()
tokens = [token for token in word_tokenize(text) if token not in punctuation and token not in self.stop_words]
return ' '.join(tokens)
def knn(self, query_vectors, base_vectors, query_metadata, base_metadata, batch_size=1000, K=200):
t = time.time()
nn = dict()
for i in range(0, query_vectors.shape[0], batch_size):
topk = torch.topk(torch.matmul(query_vectors[i:(i + batch_size)], base_vectors.transpose(1, 0)), k=K, dim=1)
distances, indices = topk[0].cpu().numpy(), topk[1].cpu().numpy()
for j in range(distances.shape[0]):
qr_key = query_metadata[i][-1]
nn[qr_key] = [{'doc_id': base_metadata[x][0].replace('.json', ''), 'sent_text': base_metadata[x][3],
'sent_no': base_metadata[x][2],
'sec_id': base_metadata[x][1], 'sim': distances[j, idx]} for idx, x in
enumerate(indices[j])]
logging.info('Finished % out of %s in %s', i, query_vectors.shape[0], time.time() - t)
del topk
del distances
del indices
logging.info('Done! %s', time.time() - t)
return nn
def get_entity_base(self, color, link):
return """
<mark class="entity" style="background: {bg}; padding: 0.15em 0.15em; margin: 0 0.25em; line-height: 1.5; border-radius: 0.15em">
{text}
<span style="font-size: 0.8em; font-weight: bold; line-height: 1.5; border-radius: 0.15em; text-transform: uppercase; vertical-align: middle; margin-right: 0.15rem"><a href="{link}" target="_blank" style="text-decoration: none; color: black;">{label}</a></span>
</mark>
""".replace("{bg}", color).replace("{link}", link)
def get_entity_string(self, text, color, label, link):
return self.get_entity_base(color, link).replace("{text}", text).replace("{label}", label)
def get_highlight_base(self, color):
return """
<mark class="entity" style="background: {bg}; padding: 0.15em 0.15em; margin: 0 0.25em; line-height: 1.5; border-radius: 0.15em">
{text}
<span style="font-size: 0.8em; font-weight: bold; line-height: 1.5; border-radius: 0.15em; text-transform: uppercase; vertical-align: middle; margin-right: 0.15rem">{label}</span>
</mark>
""".replace("{bg}", color)
def get_highlight_string(self, text, color, label):
return self.get_highlight_base(color).replace("{text}", text).replace("{label}", label)
def highlight_texts(self, larger_text, entities, highlights, colors):
# spans = [(start, end, link, type), ...]
entities = sorted(entities, key=lambda x: (x[0], x[1]))
highlights = sorted(highlights, key=lambda x: (x[0], x[1]))
while entities and highlights:
last_e = entities[-1]
last_h = highlights[-1]
# if e is candidate and is not overlapping
if last_e[0] > last_h[1]:
l = """<i class="fa"></i>"""
s, e, t, link = last_e
color = colors[t]
rs = self.get_entity_string(larger_text[s:e], color, l, link)
larger_text = larger_text[:s] + rs + larger_text[e:]
entities.pop()
elif last_e[1] > last_h[0]:
l = """<i class="fa"></i>"""
s, e, t, link = last_e
color = colors[t]
rs = self.get_entity_string(larger_text[s:e], color, l, link)
larger_text = larger_text[:s] + rs + larger_text[e:]
entities.pop()
last_h[1] += len(rs) - (e - s)
else: # h goes
l = 'Highlight'
s, e, t, link = last_h
color = colors[t]
rs = self.get_highlight_string(larger_text[s:e], color, l)
larger_text = larger_text[:s] + rs + larger_text[e:]
highlights.pop()
while entities:
l = """<i class="fa"></i>"""
s, e, t, link = entities.pop()
color = colors[t]
rs = self.get_entity_string(larger_text[s:e], color, l, link)
larger_text = larger_text[:s] + rs + larger_text[e:]
while highlights:
l = 'Highlight'
s, e, t, link = highlights.pop()
color = colors[t]
rs = self.get_highlight_string(larger_text[s:e], color, l)
larger_text = larger_text[:s] + rs + larger_text[e:]
return larger_text
def h(self, larger_text, smaller_texts):
ents = []
for smaller_text in smaller_texts:
start_offset = larger_text.find(smaller_text)
ents.append({"start": start_offset, "end": start_offset + len(smaller_text), "label": "HIGHLIGHT"})
colors = {"HIGHLIGHT": "linear-gradient(90deg, #aa9cfc, #fc9ce7)"}
ex = [{"text": larger_text,
"ents": ents,
"title": None,
"colors": colors}]
res = displacy.render(ex, style="ent", manual=True, options={"colors": colors}, page=True, jupyter=False)
return res
def format_html(self, sha, title, authors, year_of_publication, link, venue, sentences, sections, section_ids,
user_sent):
try:
alist = list(csv.reader([authors.strip().replace('[', '').replace(']', '')]))[0]
except:
logging.warning('Error parsing author string: %s', authors)
alist = []
if len(alist) > 10:
alist = alist[0:10] + ['et al']
authors = "; ".join([x.replace('\'', '') for x in alist])
# s = "<h2><b><a href=\"%s\" rel=\"noopener noreferrer\" target=\"_blank\">%s</a></b></h2><div class=\"wrap\"><div class=\"res_text\"><i>%s</i><BR/>%s<BR/>%s<BR/><BR/>" % (
# link, title, str(authors), year_of_publication, venue)
s = '<div class="wrap"><div class="res_text"><div class="paper-details"><span class="year">%s | %s</span><h2><b><a href="%s" rel="noopener noreferrer" target="_blank">%s<i class="fa"></i></a></b></h2><i>%s</i><BR/><BR/><BR/>' % (
year_of_publication, venue, link, title, str(authors))
sec2sent = dict()
# todo don't copy here...
secid2sec = dict()
for sent, sec, sec_id in zip(sentences, sections, section_ids):
if sec not in sec2sent:
sec2sent[sec_id] = []
sec2sent[sec_id].append(sent['sent_text'])
secid2sec[sec_id] = sec
for sec_id, sents in sec2sent.items():
sec = secid2sec[sec_id]
entity_spans = []
highlight_spans = []
for sent in sents:
start_offset = sec.find(sent)
if start_offset >= 0:
end_offset = start_offset + len(sent)
highlight_spans.append([start_offset, end_offset, 'Highlight', None])
# {sha: {para_id: [ {start: int, end: int, url: string} ] } }
if sha in self.entity_links:
entities = self.entity_links[sha][sec_id]
for ent in entities:
entity_spans.append([ent['start'], ent['end'], ent['type'], ent['url']])
else:
logging.warning('No links found for document %s', sha)
s += self.highlight_texts(sec, entity_spans, highlight_spans, self.colors)
s += '</div><div class="legend"><div><div class="circle yellow"></div><p>Disease</p></div><div><div class="circle orange"></div><p>Gene</p></div><div><div class="circle purple"></div><p>Text Matching Search</p></div></div>'
s += "</div>"
# print(sec)
# print(sents)
# Adding image part
s += '<div class="res_image"><h2>Gene-Disease Association</h2><p>Click on the gene/disease for more information</p><br><object data="gv_files/{}.gv.svg" type="image/svg+xml"></object><p></p><p class="cite"><br>Graph data from <a href="https://www.disgenet.org">DisGeNET v6.0</a></p></div>'.format(
sha)
# s += "<div class=\"res_image\"><img src=\"gv_files/{}.gv.svg\" alt=\"Mini-KB\" width=\"95%\"></div>".format(sha)
s += "</div>"
return s
def get_search_results(self, user_query, sort_by_date=False, covid_only=False, K=100, Kdocs=20):
if self.cached_results is not None:
logging.info('getting search results for %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
return self.cached_results[user_query]
logging.info('getting search results for %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
v = self.model.embed_sentence(self.preprocess_sentence(user_query))
logging.info('starting embedding sentence %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
query_vecs = torch.from_numpy(v.astype(np.float32))
logging.info('finished embedding sentence %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
query_meta = [('query', 0, 0, user_query)]
logging.info('starting nearest neighbors for %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
nn = self.knn(query_vecs, self.all_vecs, query_meta, self.all_meta, K=K)
logging.info('found nearest neighbors for %s, K=%s, Kdocs=%s', user_query, K, Kdocs)
res = ""
for sent, nns in nn.items():
all_results | |
<reponame>morales-gregorio/elephant
"""
Gaussian-process factor analysis (GPFA) is a dimensionality reduction method
[#f1]_ for neural trajectory visualization of parallel spike trains. GPFA
applies factor analysis (FA) to time-binned spike count data to reduce the
dimensionality and at the same time smoothes the resulting low-dimensional
trajectories by fitting a Gaussian process (GP) model to them.
The input consists of a set of trials (Y), each containing a list of spike
trains (N neurons). The output is the projection (X) of the data in a space
of pre-chosen dimensionality x_dim < N.
Under the assumption of a linear relation (transform matrix C) between the
latent variable X following a Gaussian process and the spike train data Y with
a bias d and a noise term of zero mean and (co)variance R (i.e.,
:math:`Y = C X + d + Gauss(0,R)`), the projection corresponds to the
conditional probability E[X|Y].
The parameters (C, d, R) as well as the time scales and variances of the
Gaussian process are estimated from the data using an expectation-maximization
(EM) algorithm.
Internally, the analysis consists of the following steps:
0) bin the spike train data to get a sequence of N dimensional vectors of spike
counts in respective time bins, and choose the reduced dimensionality x_dim
1) expectation-maximization for fitting of the parameters C, d, R and the
time-scales and variances of the Gaussian process, using all the trials
provided as input (c.f., `gpfa_core.em()`)
2) projection of single trials in the low dimensional space (c.f.,
`gpfa_core.exact_inference_with_ll()`)
3) orthonormalization of the matrix C and the corresponding subspace, for
visualization purposes: (c.f., `gpfa_core.orthonormalize()`)
.. autosummary::
:toctree: toctree/gpfa
GPFA
Visualization
-------------
Visualization of GPFA transforms is covered in Viziphant:
https://viziphant.readthedocs.io/en/latest/modules.html
Tutorial
--------
:doc:`View tutorial <../tutorials/gpfa>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/gpfa.ipynb
References
----------
The code was ported from the MATLAB code based on Byron Yu's implementation.
The original MATLAB code is available at Byron Yu's website:
https://users.ece.cmu.edu/~byronyu/software.shtml
.. [#f1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2009)
Gaussian-process factor analysis for low-dimensional single-trial analysis
of neural population activity. J Neurophysiol 102:614-635.
:copyright: Copyright 2015-2019 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import neo
import numpy as np
import quantities as pq
import sklearn
import warnings
from elephant.gpfa import gpfa_core, gpfa_util
from elephant.utils import deprecated_alias
__all__ = [
"GPFA"
]
class GPFA(sklearn.base.BaseEstimator):
"""
Apply Gaussian process factor analysis (GPFA) to spike train data
There are two principle scenarios of using the GPFA analysis, both of which
can be performed in an instance of the GPFA() class.
In the first scenario, only one single dataset is used to fit the model and
to extract the neural trajectories. The parameters that describe the
transformation are first extracted from the data using the `fit()` method
of the GPFA class. Then the same data is projected into the orthonormal
basis using the method `transform()`. The `fit_transform()` method can be
used to perform these two steps at once.
In the second scenario, a single dataset is split into training and test
datasets. Here, the parameters are estimated from the training data. Then
the test data is projected into the low-dimensional space previously
obtained from the training data. This analysis is performed by executing
first the `fit()` method on the training data, followed by the
`transform()` method on the test dataset.
The GPFA class is compatible to the cross-validation functions of
`sklearn.model_selection`, such that users can perform cross-validation to
search for a set of parameters yielding best performance using these
functions.
Parameters
----------
x_dim : int, optional
state dimensionality
Default: 3
bin_size : float, optional
spike bin width in msec
Default: 20.0
min_var_frac : float, optional
fraction of overall data variance for each observed dimension to set as
the private variance floor. This is used to combat Heywood cases,
where ML parameter learning returns one or more zero private variances.
Default: 0.01
(See Martin & McDonald, Psychometrika, Dec 1975.)
em_tol : float, optional
stopping criterion for EM
Default: 1e-8
em_max_iters : int, optional
number of EM iterations to run
Default: 500
tau_init : float, optional
GP timescale initialization in msec
Default: 100
eps_init : float, optional
GP noise variance initialization
Default: 1e-3
freq_ll : int, optional
data likelihood is computed at every freq_ll EM iterations. freq_ll = 1
means that data likelihood is computed at every iteration.
Default: 5
verbose : bool, optional
specifies whether to display status messages
Default: False
Attributes
----------
valid_data_names : tuple of str
Names of the data contained in the resultant data structure, used to
check the validity of users' request
has_spikes_bool : np.ndarray of bool
Indicates if a neuron has any spikes across trials of the training
data.
params_estimated : dict
Estimated model parameters. Updated at each run of the fit() method.
covType : str
type of GP covariance, either 'rbf', 'tri', or 'logexp'.
Currently, only 'rbf' is supported.
gamma : (1, #latent_vars) np.ndarray
related to GP timescales of latent variables before
orthonormalization by :math:`bin_size / sqrt(gamma)`
eps : (1, #latent_vars) np.ndarray
GP noise variances
d : (#units, 1) np.ndarray
observation mean
C : (#units, #latent_vars) np.ndarray
loading matrix, representing the mapping between the neuronal data
space and the latent variable space
R : (#units, #latent_vars) np.ndarray
observation noise covariance
fit_info : dict
Information of the fitting process. Updated at each run of the fit()
method.
iteration_time : list
containing the runtime for each iteration step in the EM algorithm.
log_likelihoods : list
log likelihoods after each EM iteration.
transform_info : dict
Information of the transforming process. Updated at each run of the
transform() method.
log_likelihood : float
maximized likelihood of the transformed data
num_bins : nd.array
number of bins in each trial
Corth : (#units, #latent_vars) np.ndarray
mapping between the neuronal data space and the orthonormal
latent variable space
Methods
-------
fit
transform
fit_transform
score
Raises
------
ValueError
If `bin_size` or `tau_init` is not a `pq.Quantity`.
Examples
--------
In the following example, we calculate the neural trajectories of 20
independent Poisson spike trains recorded in 50 trials with randomized
rates up to 100 Hz.
>>> import numpy as np
>>> import quantities as pq
>>> from elephant.gpfa import GPFA
>>> from elephant.spike_train_generation import homogeneous_poisson_process
>>> data = []
>>> for trial in range(50):
>>> n_channels = 20
>>> firing_rates = np.random.randint(low=1, high=100,
... size=n_channels) * pq.Hz
>>> spike_times = [homogeneous_poisson_process(rate=rate)
... for rate in firing_rates]
>>> data.append((trial, spike_times))
...
>>> gpfa = GPFA(bin_size=20*pq.ms, x_dim=8)
>>> gpfa.fit(data)
>>> results = gpfa.transform(data, returned_data=['latent_variable_orth',
... 'latent_variable'])
>>> latent_variable_orth = results['latent_variable_orth']
>>> latent_variable = results['latent_variable']
or simply
>>> results = GPFA(bin_size=20*pq.ms, x_dim=8).fit_transform(data,
... returned_data=['latent_variable_orth',
... 'latent_variable'])
"""
@deprecated_alias(binsize='bin_size')
def __init__(self, bin_size=20 * pq.ms, x_dim=3, min_var_frac=0.01,
tau_init=100.0 * pq.ms, eps_init=1.0E-3, em_tol=1.0E-8,
em_max_iters=500, freq_ll=5, verbose=False):
self.bin_size = bin_size
self.x_dim = x_dim
self.min_var_frac = min_var_frac
self.tau_init = tau_init
self.eps_init = eps_init
self.em_tol = em_tol
self.em_max_iters = em_max_iters
self.freq_ll = freq_ll
self.valid_data_names = (
'latent_variable_orth',
'latent_variable',
'Vsm',
'VsmGP',
'y')
self.verbose = verbose
if not isinstance(self.bin_size, pq.Quantity):
raise ValueError("'bin_size' must be of type pq.Quantity")
if not isinstance(self.tau_init, pq.Quantity):
raise ValueError("'tau_init' must be of type pq.Quantity")
# will be updated later
self.params_estimated = dict()
self.fit_info = dict()
self.transform_info = dict()
@property
def binsize(self):
warnings.warn("'binsize' is deprecated; use 'bin_size'")
return self.bin_size
def fit(self, spiketrains):
"""
Fit the model with the given training data.
Parameters
----------
spiketrains : list of list of neo.SpikeTrain
Spike train data to be fit to latent variables.
The outer list corresponds to trials and the inner list corresponds
to the neurons recorded in that trial, such that
`spiketrains[l][n]` is the spike train of neuron `n` in trial `l`.
Note that the number and order of `neo.SpikeTrain` objects per
trial must be fixed such that `spiketrains[l][n]` and
`spiketrains[k][n]` refer to spike trains of the same neuron
for any choices of `l`, `k`, and `n`.
Returns
-------
self : object
Returns the instance itself.
Raises
------
ValueError
If `spiketrains` is an empty list.
If `spiketrains[0][0]` is not a `neo.SpikeTrain`.
If covariance matrix of input spike data is rank deficient.
"""
self._check_training_data(spiketrains)
seqs_train = self._format_training_data(spiketrains)
# Check if training data covariance is full rank
| |
<reponame>ryanhope2/scormcloud-api-v2-client-python
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RegistrationApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def build_registration_launch_link(self, registration_id, launch_link_request, **kwargs):
"""
Get registration launch link.
Returns the link to use to launch this registration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build_registration_launch_link(registration_id, launch_link_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param LaunchLinkRequestSchema launch_link_request: (required)
:return: LaunchLinkSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.build_registration_launch_link_with_http_info(registration_id, launch_link_request, **kwargs)
else:
(data) = self.build_registration_launch_link_with_http_info(registration_id, launch_link_request, **kwargs)
return data
def build_registration_launch_link_with_http_info(self, registration_id, launch_link_request, **kwargs):
"""
Get registration launch link.
Returns the link to use to launch this registration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build_registration_launch_link_with_http_info(registration_id, launch_link_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param LaunchLinkRequestSchema launch_link_request: (required)
:return: LaunchLinkSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id', 'launch_link_request']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method build_registration_launch_link" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `build_registration_launch_link`")
# verify the required parameter 'launch_link_request' is set
if ('launch_link_request' not in params) or (params['launch_link_request'] is None):
raise ValueError("Missing the required parameter `launch_link_request` when calling `build_registration_launch_link`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/launchLink'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'launch_link_request' in params:
body_params = params['launch_link_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LaunchLinkSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_new_registration_instance(self, registration_id, **kwargs):
"""
Create a new instance for this registration specified by the registration ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new_registration_instance(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_new_registration_instance_with_http_info(registration_id, **kwargs)
else:
(data) = self.create_new_registration_instance_with_http_info(registration_id, **kwargs)
return data
def create_new_registration_instance_with_http_info(self, registration_id, **kwargs):
"""
Create a new instance for this registration specified by the registration ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new_registration_instance_with_http_info(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_new_registration_instance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `create_new_registration_instance`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/instances'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_registration(self, registration, **kwargs):
"""
Create a registration.
This method is used to create a new registration. A registration will contain a few pieces of information such as a learner name, a learner id, and optionally, information about where activity data should be posted (for client consumption), as well as a way to specify simple authentication schemes for posting said data. A registration must be tied to a specific course at creation time. When the created registration is “launched”, the course specified at creation time will be launched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_registration(registration, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateRegistrationSchema registration: (required)
:param int course_version: The version of the course you want to create the registration for. Unless you have a reason for using this you probably do not need to.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_registration_with_http_info(registration, **kwargs)
else:
(data) = self.create_registration_with_http_info(registration, **kwargs)
return data
def create_registration_with_http_info(self, registration, **kwargs):
"""
Create a registration.
This method is used to create a new registration. A registration will contain a few pieces of information such as a learner name, a learner id, and optionally, information about where activity data should be posted (for client consumption), as well as a way to specify simple authentication schemes for posting said data. A registration must be tied to a specific course at creation time. When the created registration is “launched”, the course specified at creation time will be launched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_registration_with_http_info(registration, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateRegistrationSchema registration: (required)
:param int course_version: The version of the course you want to create the registration for. Unless you have a reason for using this you probably do not need to.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration', 'course_version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_registration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration' is set
if ('registration' not in params) or (params['registration'] is None):
raise ValueError("Missing the required parameter `registration` when calling `create_registration`")
collection_formats = {}
resource_path = '/registrations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'course_version' in params:
query_params['courseVersion'] = params['course_version']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'registration' in | |
<gh_stars>1-10
__name__ = "ammolite"
__author__ = "<NAME>"
__all__ = ["PyMOLObject", "NonexistentObjectError", "ModifiedObjectError"]
import numbers
from functools import wraps
import numpy as np
import biotite.structure as struc
from .convert import convert_to_atom_array, convert_to_chempy_model
from .startup import get_and_set_pymol_instance
def validate(method):
"""
Check if the object name still exists and if the atom count has
been modified.
If this is the case, raise the appropriate exception.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
self._check_existence()
new_atom_count = self._cmd.count_atoms(f"model {self._name}")
if new_atom_count != self._atom_count:
raise ModifiedObjectError(
f"The number of atoms in the object changed "
f"from the original {self._atom_count} atoms "
f" to {new_atom_count} atoms"
)
return method(self, *args, **kwargs)
return wrapper
class PyMOLObject:
"""
A wrapper around a *PyMOL object* (*PyMOL model*), usually created
by the static :meth:`from_structure()` method.
This class is primarily used to create *PyMOL* selection strings
from boolean masks of an corresponding :class:`AtomArray` or
:class:`AtomArrayStack` via the :meth:`where()`.
Additionally, objects of this class provide wrapper methods for
common *PyMOL* commands (e.g. ``show()`` or ``color()``), that
directly support boolean masks for the ``selection`` parameter.
Instances of this class become invalid, when atoms are added to or
are deleted from the underlying *PyMOL* object.
Calling methods of such an an invalidated object raises an
:exc:`ModifiedObjectError`.
Likewise, calling methods of an object, of which the underlying
*PyMOL* object does not exist anymore, raises an
:exc:`NonexistentObjectError`.
Parameters
----------
name : str
The name of the *PyMOL* object.
pymol_instance : module or SingletonPyMOL or PyMOL, optional
If *PyMOL* is used in library mode, the :class:`PyMOL`
or :class:`SingletonPyMOL` object is given here.
If otherwise *PyMOL* is used in GUI mode, the :mod:`pymol`
module is given.
By default the currently used *PyMOL* instance
(``ammolite.pymol``) is used.
If no *PyMOL* instance is currently running,
*PyMOL* is started in library mode.
delete : PyMOL, optional
If set to true, the underlying *PyMOL* object will be removed
from the *PyMOL* session,
when this object is garbage collected.
Attributes
----------
name : str
The name of the *PyMOL* object.
"""
_object_counter = 0
_color_counter = 0
def __init__(self, name, pymol_instance=None, delete=True):
self._name = name
self._pymol = get_and_set_pymol_instance(pymol_instance)
self._delete = delete
self._cmd = self._pymol.cmd
self._check_existence()
self._atom_count = self._cmd.count_atoms(f"model {self._name}")
def __del__(self):
if self._delete:
try:
# Try to delete this object from PyMOL
# Fails if PyMOL itself is already garbage collected
self._cmd.delete(self._name)
except:
pass
@staticmethod
def from_structure(atoms, name=None, pymol_instance=None, delete=True):
"""
Create a :class:`PyMOLObject` from an :class:`AtomArray` or
:class:`AtomArrayStack` and add it to the *PyMOL* session.
Parameters
----------
atoms : AtomArray or AtomArrayStack
The structure to be converted.
name : str, optional
The name of the newly created *PyMOL* object.
If omitted, a unique name is generated.
pymol_instance : module or SingletonPyMOL or PyMOL, optional
If *PyMOL* is used in library mode, the :class:`PyMOL`
or :class:`SingletonPyMOL` object is given here.
If otherwise *PyMOL* is used in GUI mode, the :mod:`pymol`
module is given.
By default the currently used *PyMOL* instance
(``ammolite.pymol``) is used.
If no *PyMOL* instance is currently running,
*PyMOL* is started in library mode.
delete : PyMOL, optional
If set to true, the underlying *PyMOL* object will be
removed from the *PyMOL* session, when this object is
garbage collected.
"""
pymol_instance = get_and_set_pymol_instance(pymol_instance)
cmd = pymol_instance.cmd
if name is None:
name = f"ammolite_obj_{PyMOLObject._object_counter}"
PyMOLObject._object_counter += 1
if isinstance(atoms, struc.AtomArray) or \
(isinstance(atoms, struc.AtomArrayStack) and atoms.stack_depth == 1):
model = convert_to_chempy_model(atoms)
cmd.load_model(model, name)
elif isinstance(atoms, struc.AtomArrayStack):
# Use first model as template
model = convert_to_chempy_model(atoms[0])
cmd.load_model(model, name)
# Append states corresponding to all following models
for coord in atoms.coord[1:]:
cmd.load_coordset(coord, name)
else:
raise TypeError("Expected 'AtomArray' or 'AtomArrayStack'")
return PyMOLObject(name, pymol_instance, delete)
def to_structure(self, state=None, altloc="first", extra_fields=None,
include_bonds=False):
"""
Convert this object into an :class:`AtomArray` or
:class:`AtomArrayStack`.
The returned :class:`AtomArray` contains the optional annotation
categories ``b_factor``, ``occupancy`` and ``charge``.
Parameters
----------
state : int, optional
If this parameter is given, the function will return an
:class:`AtomArray` corresponding to the given state of the
*PyMOL* object.
If this parameter is omitted, an :class:`AtomArrayStack`
containing all states will be returned, even if the *PyMOL*
object contains only one state.
altloc : {'first', 'occupancy', 'all'}
This parameter defines how *altloc* IDs are handled:
- ``'first'`` - Use atoms that have the first
*altloc* ID appearing in a residue.
- ``'occupancy'`` - Use atoms that have the *altloc* ID
with the highest occupancy for a residue.
- ``'all'`` - Use all atoms.
Note that this leads to duplicate atoms.
When this option is chosen, the ``altloc_id``
annotation array is added to the returned structure.
include_bonds : bool, optional
If set to true, an associated :class:`BondList` will be created
for the returned structure.
Returns
-------
structure : AtomArray or AtomArrayStack
The converted structure.
Whether an :class:`AtomArray` or :class:`AtomArrayStack` is
returned depends on the `state` parameter.
"""
if state is None:
model = self._cmd.get_model(self._name, state=1)
template = convert_to_atom_array(
model, include_bonds
)
expected_length = None
coord = []
for i in range(self._cmd.count_states(self._name)):
state_coord = self._cmd.get_coordset(self._name, state=i+1)
if expected_length is None:
expected_length = len(state_coord)
elif len(state_coord) != expected_length:
raise ValueError(
"The models have different numbers of atoms"
)
coord.append(state_coord)
coord = np.stack(coord)
structure = struc.from_template(template, coord)
else:
model = self._cmd.get_model(self._name, state=state)
structure = convert_to_atom_array(
model, include_bonds
)
# Filter altloc IDs and return
if altloc == "occupancy":
structure = structure[
...,
struc.filter_highest_occupancy_altloc(
structure, structure.altloc_id, structure.occupancy
)
]
structure.del_annotation("altloc_id")
return structure
elif altloc == "first":
structure = structure[
...,
struc.filter_first_altloc(structure, structure.altloc_id)
]
structure.del_annotation("altloc_id")
return structure
elif altloc == "all":
return structure
else:
raise ValueError(f"'{altloc}' is not a valid 'altloc' option")
@property
def name(self):
return self._name
def exists(self):
"""
Check whether the underlying *PyMOL* object still exists.
Returns
-------
bool
True if the *PyMOL* session contains an object with the name
of this :class:`PyMOLObject`, false otherwise.
"""
return self._name in self._cmd.get_object_list()
def _check_existence(self):
if not self.exists():
raise NonexistentObjectError(
f"A PyMOL object with the name {self._name} "
f"does not exist anymore"
)
@validate
def where(self, index):
"""
Convert a *Biotite*-compatible atom selection index
(integer, slice, boolean mask, index array) into a *PyMOL*
selection expression.
Parameters
----------
index : int or slice or ndarray, dtype=bool or ndarray, dtype=int
The boolean mask to be converted into a selection string.
Returns
-------
expression : str
A *PyMOL* compatible selection expression.
"""
if isinstance(index, numbers.Integral):
# PyMOLs indexing starts at 1
return f"model {self._name} and index {index}"
elif isinstance(index, np.ndarray) and index.dtype == bool:
mask = index
if len(mask) != self._atom_count:
raise IndexError(
f"Mask has length {len(mask)}, but the number of "
f"atoms in the PyMOL model is {self._atom_count}"
)
else:
# Convert any other index type into a boolean mask
mask = np.zeros(self._atom_count, dtype=bool)
mask[index] = True
# Indices where the mask changes from True to False
# or from False to True
# The '+1' makes each index refer to the position
# after the change i.e. the new value
changes = np.where(np.diff(mask))[0] + 1
# If first element is True, insert index 0 at start
# -> the first change is always from False to True
if mask[0]:
changes = np.concatenate(([0], changes))
# If the last element is True, insert append length of mask
# as exclusive stop index
# -> the last change is always from True to False
if mask[-1]:
changes = np.concatenate((changes, [len(mask)]))
# -> Changes are alternating (F->T, T->F, F->T, ..., F->T, T->F)
# Reshape into pairs ([F->T, T->F], [F->T, T->F], ...)
# -> these are the intervals where the mask is True
intervals = np.reshape(changes, (-1, 2))
if len(intervals) > 0:
# Convert interval into selection string
# Two things to note:
# - PyMOLs indexing starts at 1-> 'start+1'
# - Stop in 'intervals' is exclusive -> 'stop+1-1' -> 'stop'
index_selection = " or ".join(
[f"index {start+1}-{stop}" for start, stop in intervals]
)
# Constrain the selection to given object name
return f"model {self._name} and ({index_selection})"
else:
| |
operation code
if len(writeByteList) == 0:
return self.Permute6309WriteLayouts(startAsm, layoutDict, writeByteList, 1)
# try out all possible permutations of byte writes
bFoundCandidates = False
for writeIdx in range(writeListLen):
writeByteCmd = writeByteList[writeIdx]
layoutDict[1].append(writeByteCmd)
newWriteByteList = writeByteList[:writeIdx] + writeByteList[writeIdx+1:]
# recurse down into this case
trialAsm = self.Permute6309WriteLayouts(copy.deepcopy(startAsm), layoutDict, newWriteByteList, 1)
# revert our layoutDict changes
del layoutDict[1][-1]
# update our bestAsm case
if not bFoundCandidates:
bestAsm = trialAsm # since this is our first candidate, it is currently the best
bFoundCandidates = True
else:
bestAsm = self.BestResult(bestAsm, trialAsm)
continue
# we are completely done; return the best result
return bestAsm
# *************************************************************************************************
# Sprite class: Draw function row generation for 6809
# *************************************************************************************************
def RowDraw6809(self, y, regState, byteStrips):
# iterate through all permutations of byte/word layouts for strips to find fastest one
layoutList = [ ]
return self.PermuteByteStripLayouts(y, regState, layoutList, byteStrips)
def PermuteByteStripLayouts(self, rowNum, regState, layoutList, remainingCmdStrips):
# if we are at a leaf, then we have a complete row layout to turn into assembly code
if len(remainingCmdStrips) == 0:
rowAsm = self.GenRowCode(rowNum, copy.deepcopy(regState), layoutList)
return rowAsm
# otherwise we have more pixel strips to permute, so we will recurse
activeByteCmdStrip = remainingCmdStrips[0]
nextByteCmdStrips = remainingCmdStrips[1:]
# if there is only one layout choice, then recurse into trivial case
bytesInStrip = len(activeByteCmdStrip[1])
if bytesInStrip == 1 or (bytesInStrip & 1) == 0:
layoutList.append((activeByteCmdStrip, None))
bestAsm = self.PermuteByteStripLayouts(rowNum, regState, layoutList, nextByteCmdStrips)
layoutList.pop()
return bestAsm
# otherwise, iterate through each possible position for single byte in odd-length strip
bestAsm = None
for idx in range(0, bytesInStrip, 2):
singleByteOffX = activeByteCmdStrip[0] + idx
layoutList.append((activeByteCmdStrip, singleByteOffX))
trialAsm = self.PermuteByteStripLayouts(rowNum, regState, layoutList, nextByteCmdStrips)
layoutList.pop()
if bestAsm == None or trialAsm.metrics.cycles < bestAsm.metrics.cycles:
#if bestAsm != None: # fixme debug
# print("%s: row layout with (cyc=%i,bytes=%i) is better than (cyc=%i,bytes=%i)" % (self.name, trialAsm.metrics.cycles, trialAsm.metrics.bytes, bestAsm.metrics.cycles, bestAsm.metrics.bytes))
bestAsm = trialAsm
# return the best one
return bestAsm
def GenRowCode(self, rowNum, regState, layoutList):
rowAsm = AsmStream(None, regState)
# generate cmdBytesToStore and cmdWordsToStore lists
cmdBytesToStore = []
cmdWordsToStore = []
cmdBytesToWrite = []
cmdWordsToWrite = []
offY = self.YPtrOffNew
for ((stripOffX,stripByteCmds), singleByteOffX) in layoutList:
numByteCmds = len(stripByteCmds)
offX = stripOffX
while offX < stripOffX+numByteCmds:
numLeft = stripOffX+numByteCmds - offX
# do we process a single byte?
if numLeft == 1 or singleByteOffX == offX:
byteCmd = stripByteCmds[offX-stripOffX]
cmdBytesToStore.append((offX, offY, byteCmd))
offX += 1
offY += 1
continue
# otherwise we must process a word
byteCmd1 = stripByteCmds[offX-stripOffX]
byteCmd2 = stripByteCmds[offX-stripOffX+1]
cmdWordsToStore.append((offX, offY, byteCmd1, byteCmd2))
offX += 2
offY += 2
# use U to store all words with no command2 bytes
idx = 0
while idx < len(cmdWordsToStore):
(offX,offY,byteCmd1,byteCmd2) = cmdWordsToStore[idx]
if byteCmd1[0] != 2 and byteCmd2[0] != 2:
# emit code to store the background word with U
rowAsm.gen_loadstore_indexed(True, regU, regX, offX + 256*self.lineAdvance, "") # ldu off,x
rowAsm.gen_loadstore_indexed(False, regU, regY, offY, "")
# move this command word into the cmdWordsToWrite or cmdBytesToWrite list
if byteCmd1[0] == 1:
cmdWordsToStore.pop(idx)
cmdBytesToWrite.append((offX+1,offY+1,byteCmd2))
elif byteCmd2[0] == 1:
cmdWordsToStore.pop(idx)
cmdBytesToWrite.append((offX,offY,byteCmd1))
else:
cmdWordsToWrite.append(cmdWordsToStore.pop(idx))
continue
# we can't save this word right now, so skip it
idx += 1
# write all of the pure Command3 words which match D (if it's valid)
if rowAsm.reg.IsValid(regD):
idx = 0
while idx < len(cmdWordsToWrite):
(offX,offY,byteCmd1,byteCmd2) = cmdWordsToWrite[idx]
if byteCmd1[1] == rowAsm.reg.GetValue(regA) and byteCmd2[1] == rowAsm.reg.GetValue(regB):
rowAsm.gen_loadstore_indexed(False, regD, regX, offX + 256*self.lineAdvance, "") # std off,x
cmdWordsToWrite.pop(idx)
continue
idx += 1
# decide which byte register (A or B) to use as scratch.
# preserve either register (A or B) which matches a Command-3 byte
if not rowAsm.reg.IsValid(regA):
scratchReg = regA
elif not rowAsm.reg.IsValid(regB):
scratchReg = regB
else:
scratchReg = regB
for (offX,offY,(byteCmdNum,byteCmdVal,byteCmdMask)) in cmdBytesToStore + cmdBytesToWrite:
if byteCmdNum != 3:
continue
if byteCmdVal == rowAsm.reg.GetValue(regB):
scratchReg = regA
# use the scratch register to store all bytes, and write all Command2 bytes
while len(cmdBytesToStore) > 0:
(offX,offY,byteCmd) = cmdBytesToStore.pop(0)
rowAsm.gen_loadstore_indexed(True, scratchReg, regX, offX + 256*self.lineAdvance, "")
rowAsm.gen_loadstore_indexed(False, scratchReg, regY, offY, "")
if byteCmd[0] == 2:
# we don't need to clear bits with AND mask if nybble we're writing is 15
if (byteCmd[1] | byteCmd[2]) != 0xff:
rowAsm.emit_op(f"and{regName[scratchReg]}", (f"#${byteCmd[2]:02x}"), "", 2, 2, 2)
if byteCmd[1] != 0:
rowAsm.emit_op(f"or{regName[scratchReg]}", (f"#${byteCmd[1]:02x}"), "", 2, 2, 2)
rowAsm.gen_loadstore_indexed(False, scratchReg, regX, offX + 256*self.lineAdvance, "")
elif byteCmd[0] == 3:
cmdBytesToWrite.append((offX,offY,byteCmd))
# write all Command3 bytes which match a valid register
idx = 0
while idx < len(cmdBytesToWrite):
(offX,offY,byteCmd) = cmdBytesToWrite[idx]
if byteCmd[0] != 3:
raise Exception(f"Error: byte command {int(byteCmd[0])} is in the cmdBytesToWrite list!")
if rowAsm.reg.IsValid(regA) and byteCmd[1] == rowAsm.reg.GetValue(regA):
rowAsm.gen_loadstore_indexed(False, regA, regX, offX + 256*self.lineAdvance, "") # sta off,x
cmdBytesToWrite.pop(idx)
continue
elif rowAsm.reg.IsValid(regB) and byteCmd[1] == rowAsm.reg.GetValue(regB):
rowAsm.gen_loadstore_indexed(False, regB, regX, offX + 256*self.lineAdvance, "") # stb off,x
cmdBytesToWrite.pop(idx)
continue
idx += 1
# fixme (micro-op): if there is a word in cmdWordsToStore which contains a command-3 byte which
# is part of the future lonelyBytes list, then put that word at end of list
# use D (trashing it) to Store and Write the remaining unstored words (which all must have a Command2 byte)
while len(cmdWordsToStore) > 0:
(offX,offY,byteCmd1,byteCmd2) = cmdWordsToStore.pop(0)
rowAsm.gen_loadstore_indexed(True, regD, regX, offX + 256*self.lineAdvance, "") # ldd off,x
rowAsm.gen_loadstore_indexed(False, regD, regY, offY, "")
if byteCmd1[0] == 2 and byteCmd2[0] == 2:
byteSplit = False
# we don't need to clear bits with AND mask if nybble we're writing is 15
if (byteCmd1[1] | byteCmd1[2]) != 0xff:
rowAsm.emit_op("anda", (f"#${byteCmd1[2]:02x}"), "", 2, 2, 2)
else:
byteSplit = True
if (byteCmd2[1] | byteCmd2[2]) != 0xff:
rowAsm.emit_op("andb", (f"#${byteCmd2[2]:02x}"), "", 2, 2, 2)
else:
byteSplit = True
if byteSplit:
# we don't need to write nybble with OR if we're writing 0
if byteCmd1[1] != 0:
rowAsm.emit_op("ora", (f"#${byteCmd1[1]:02x}"), "", 2, 2, 2)
if byteCmd2[1] != 0:
rowAsm.emit_op("orb", (f"#${byteCmd2[1]:02x}"), "", 2, 2, 2)
else:
wordAdd = (byteCmd1[1] << 8) + byteCmd2[1]
if wordAdd != 0:
rowAsm.emit_op("addd", f"#${wordAdd:04x}", "", 4, 3, 3)
rowAsm.gen_loadstore_indexed(False, regD, regX, offX + 256*self.lineAdvance, "") # std off,x
elif byteCmd1[0] == 2:
# we don't need to clear bits with AND mask if nybble we're writing is 15
if (byteCmd1[1] | byteCmd1[2]) != 0xff:
rowAsm.emit_op("anda", (f"#${byteCmd1[2]:02x}"), "", 2, 2, 2)
# we don't need to write nybble with OR if we're writing 0
if byteCmd1[1] != 0:
rowAsm.emit_op("ora", (f"#${byteCmd1[1]:02x}"), "", 2, 2, 2)
if byteCmd2[0] == 1:
rowAsm.gen_loadstore_indexed(False, regA, regX, offX + 256*self.lineAdvance, "") # sta off,x
else: # assert: byteCmd2[0] == 3
rowAsm.gen_loadimm_accum(regB, byteCmd2[1], "")
rowAsm.gen_loadstore_indexed(False, regD, regX, offX + 256* self.lineAdvance, "") # std off,x
elif byteCmd2[0] == 2:
# we don't need to clear bits with AND mask if nybble we're writing is 15
if (byteCmd2[1] | byteCmd2[2]) != 0xff:
rowAsm.emit_op("andb", (f"#${byteCmd2[2]:02x}"), "", 2, 2, 2)
# we don't need to write nybble with OR if we're writing 0
if byteCmd2[1] != 0:
rowAsm.emit_op("orb", (f"#${byteCmd2[1]:02x}"), "", 2, 2, 2)
if byteCmd1[0] == 1:
rowAsm.gen_loadstore_indexed(False, regB, regX, offX+1 + 256*self.lineAdvance, "") # stb off,x
else: # assert: byteCmd1[0] == 3
rowAsm.gen_loadimm_accum(regA, byteCmd1[1], "")
rowAsm.gen_loadstore_indexed(False, regD, regX, offX + 256*self.lineAdvance, "") # std off,x
else:
raise Exception("Error: word in cmdWordsToStore contains no Command-2 bytes!")
# assert that only command3 bytes/words are remaining to be written, and all bytes/words have been stored
if len(cmdBytesToStore) > 0 or len(cmdWordsToStore) > 0:
raise Exception("internal error: unstored bytes/words remaining!");
for (offX,offY,byteCmd) in cmdBytesToWrite:
if byteCmd[0] != 3:
raise Exception(f"internal error: command-{int(byteCmd[0])} byte to write")
for (offX,offY,byteCmd1,byteCmd2) in cmdWordsToWrite:
if byteCmd1[0] != 3 or byteCmd2[0] != 3:
raise Exception(f"internal error: command-({int(byteCmd1[0])},{int(byteCmd2[0])}) bytes to write in word")
# emit byte writes for any bytes which match our current register values
if rowAsm.reg.IsValid(regA) or rowAsm.reg.IsValid(regB):
idx = 0
while idx < len(cmdBytesToWrite):
(offX,offY,byteCmd) = cmdBytesToWrite[idx]
matchReg = None
if rowAsm.reg.IsValid(regA) and byteCmd[1] == rowAsm.reg.GetValue(regA):
matchReg = regA
elif rowAsm.reg.IsValid(regB) and byteCmd[1] == rowAsm.reg.GetValue(regB):
matchReg = regB
if matchReg == None:
idx += 1
continue
# we found a | |
<filename>tests/tests/test_filetransfer.py
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import io
import os
import os.path
import random
import requests
import shutil
import tempfile
import pytest
import time
import uuid
import urllib.parse
from tempfile import NamedTemporaryFile
from ..common_setup import standard_setup_one_client, enterprise_no_client
from ..MenderAPI import (
authentication,
devauth,
get_container_manager,
reset_mender_api,
DeviceAuthV2,
logger,
)
from .common_connect import wait_for_connect
from .common import md5sum
from .mendertesting import MenderTesting
from testutils.infra.container_manager import factory
from testutils.infra.device import MenderDevice
from testutils.common import User, update_tenant, new_tenant_client
from testutils.infra.cli import CliTenantadm
container_factory = factory.get_factory()
connect_service_name = "mender-connect"
def download_file(path, devid, authtoken):
deviceconnect_url = (
"https://%s/api/management/v1/deviceconnect"
% get_container_manager().get_mender_gateway()
)
download_url = "%s/devices/%s/download" % (deviceconnect_url, devid)
download_url_with_path = download_url + "?path=" + urllib.parse.quote(path)
return requests.get(download_url_with_path, verify=False, headers=authtoken)
def upload_file(path, file, devid, authtoken, mode="600", uid="0", gid="0"):
files = (
("path", (None, path)),
("mode", (None, mode)),
("uid", (None, uid)),
("gid", (None, gid)),
("file", (os.path.basename(path), file, "application/octet-stream"),),
)
deviceconnect_url = (
"https://%s/api/management/v1/deviceconnect"
% get_container_manager().get_mender_gateway()
)
upload_url = "%s/devices/%s/upload" % (deviceconnect_url, devid)
return requests.put(upload_url, verify=False, headers=authtoken, files=files)
def set_limits(mender_device, limits, auth, devid):
tmpdir = tempfile.mkdtemp()
try:
# retrieve the original configuration file
output = mender_device.run("cat /etc/mender/mender-connect.conf")
config = json.loads(output)
# update mender-connect.conf setting the file transfer limits
config["Limits"] = limits
mender_connect_conf = os.path.join(tmpdir, "mender-connect.conf")
with open(mender_connect_conf, "w") as fd:
json.dump(config, fd)
mender_device.run(
"cp /etc/mender/mender-connect.conf /etc/mender/mender-connect.conf-backup-`ls /etc/mender/mender-connect.* | wc -l`"
)
mender_device.put(
os.path.basename(mender_connect_conf),
local_path=os.path.dirname(mender_connect_conf),
remote_path="/etc/mender",
)
finally:
shutil.rmtree(tmpdir)
mender_device.run("systemctl restart %s" % connect_service_name)
wait_for_connect(auth, devid)
debugoutput = mender_device.run("cat /etc/mender/mender-connect.conf")
logger.info("/etc/mender/mender-connect.conf:\n%s" % debugoutput)
debugoutput = mender_device.run("ls -al /etc/mender")
logger.info("ls -al /etc/mender/:\n%s" % debugoutput)
class _TestFileTransferBase(MenderTesting):
def test_filetransfer(
self, devid, authtoken, path="/etc/mender/mender.conf", content_assertion=None
):
# download a file and check its content
r = download_file(path, devid, authtoken)
assert r.status_code == 200, r.json()
if content_assertion:
assert content_assertion in str(r.content)
assert (
r.headers.get("Content-Disposition")
== 'attachment; filename="' + os.path.basename(path) + '"'
)
assert r.headers.get("Content-Type") == "application/octet-stream"
assert r.headers.get("X-Men-File-Gid") == "0"
assert r.headers.get("X-Men-File-Uid") == "0"
assert r.headers.get("X-Men-File-Mode") == "600"
assert r.headers.get("X-Men-File-Path") == "/etc/mender/mender.conf"
assert r.headers.get("X-Men-File-Size") != ""
# wrong request, path is relative
path = "relative/path"
r = download_file(path, devid, authtoken)
assert r.status_code == 400, r.json()
assert r.json().get("error") == "bad request: path: must be absolute."
# wrong request, no such file or directory
path = "/does/not/exist"
r = download_file(path, devid, authtoken)
assert r.status_code == 400, r.json()
assert "/does/not/exist: no such file or directory" in r.json().get("error")
try:
# create a 40MB random file
f = NamedTemporaryFile(delete=False)
for i in range(40 * 1024):
f.write(os.urandom(1024))
f.close()
# random uid and gid
uid = random.randint(100, 200)
gid = random.randint(100, 200)
# upload the file
r = upload_file(
"/tmp/random.bin",
open(f.name, "rb"),
devid,
authtoken,
mode="600",
uid=str(uid),
gid=str(gid),
)
assert r.status_code == 201, r.json()
# download the file
path = "/tmp/random.bin"
r = download_file(path, devid, authtoken)
assert r.status_code == 200, r.json()
assert (
r.headers.get("Content-Disposition")
== 'attachment; filename="random.bin"'
)
assert r.headers.get("Content-Type") == "application/octet-stream"
assert r.headers.get("X-Men-File-Mode") == "600"
assert r.headers.get("X-Men-File-Uid") == str(uid)
assert r.headers.get("X-Men-File-Gid") == str(gid)
assert r.headers.get("X-Men-File-Path") == "/tmp/random.bin"
assert r.headers.get("X-Men-File-Size") == str(40 * 1024 * 1024)
filename_download = f.name + ".download"
with open(filename_download, "wb") as fw:
fw.write(r.content)
# verify the file is not corrupted
assert md5sum(filename_download) == md5sum(f.name)
finally:
os.unlink(f.name)
if os.path.isfile(f.name + ".download"):
os.unlink(f.name + ".download")
# wrong request, path is relative
r = upload_file(
"relative/path/dummy.txt",
io.StringIO("dummy"),
devid,
authtoken,
mode="600",
uid="0",
gid="0",
)
assert r.status_code == 400, r.json()
assert r.json().get("error") == "bad request: path: must be absolute."
# wrong request, cannot write the file
r = upload_file(
"/does/not/exist/dummy.txt",
io.StringIO("dummy"),
devid,
authtoken,
mode="600",
uid="0",
gid="0",
)
assert r.status_code == 400, r.json()
assert "failed to create target file" in r.json().get("error")
def test_filetransfer_limits_upload(self, mender_device, devid, auth):
authtoken = auth.get_auth_token()
"""Tests the file transfer features with limits"""
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {"Chroot": "/var/lib/mender/filetransfer"},
},
auth,
devid,
)
mender_device.run("mkdir -p /var/lib/mender/filetransfer")
logger.info(
"-- testcase: File Transfer limits: file outside chroot; upload forbidden"
)
f = NamedTemporaryFile(delete=False)
for i in range(40 * 1024):
f.write(os.urandom(1024))
f.close()
r = upload_file("/usr/random.bin", open(f.name, "rb"), devid, authtoken,)
assert r.status_code == 400, r.json()
assert (
r.json().get("error")
== "access denied: the target file path is outside chroot"
)
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {
"Chroot": "/var/lib/mender/filetransfer",
"FollowSymLinks": True, # in the image /var/lib/mender is a symlink
},
},
auth,
devid,
)
logger.info(
"-- testcase: File Transfer limits: file inside chroot; upload allowed"
)
f = NamedTemporaryFile(delete=False)
f.write(os.urandom(16))
f.close()
# upload a file
r = upload_file(
"/var/lib/mender/filetransfer/random.bin",
open(f.name, "rb"),
devid,
authtoken,
)
assert r.status_code == 201
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {"MaxFileSize": 16378, "FollowSymLinks": True},
},
auth,
devid,
)
logger.info(
"-- testcase: File Transfer limits: file size over the limit; upload forbidden"
)
f = NamedTemporaryFile(delete=False)
for i in range(128 * 1024):
f.write(b"ok")
f.close()
r = upload_file("/tmp/random.bin", open(f.name, "rb"), devid, authtoken,)
assert r.status_code == 400, r.json()
assert (
r.json().get("error")
== "failed to write file chunk: transmitted bytes limit exhausted"
)
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {
"FollowSymLinks": True,
"Counters": {"MaxBytesRxPerMinute": 16784},
},
},
auth,
devid,
)
logger.info(
"-- testcase: File Transfer limits: transfers during last minute over the limit; upload forbidden"
)
f = NamedTemporaryFile(delete=False)
for i in range(128 * 1024):
f.write(b"ok")
f.close()
upload_file(
"/tmp/random-0.bin", open(f.name, "rb"), devid, authtoken,
)
upload_file(
"/tmp/random-1.bin", open(f.name, "rb"), devid, authtoken,
)
logger.info("-- testcase: File Transfer limits: sleeping to gather the avg")
time.sleep(32) # wait for mender-connect to calculate the 1m exp moving avg
mender_device.run(
"kill -USR1 `pidof mender-connect`"
) # USR1 makes mender-connect print status
r = upload_file("/tmp/random-2.bin", open(f.name, "rb"), devid, authtoken,)
assert r.status_code == 400, r.json()
assert r.json().get("error") == "transmitted bytes limit exhausted"
logger.info(
"-- testcase: File Transfer limits: transfers during last minute: test_filetransfer_limits_upload sleeping 64s to be able to transfer again"
)
# let's rest some more and increase the limit and try again
time.sleep(64)
mender_device.run(
"kill -USR1 `pidof mender-connect`"
) # USR1 makes mender-connect print status
logger.info(
"-- testcase: File Transfer limits: transfers during last minute below the limit; upload allowed"
)
f = NamedTemporaryFile(delete=False)
for i in range(64):
f.write(b"ok")
f.close()
# upload a file
r = upload_file("/tmp/random-a.bin", open(f.name, "rb"), devid, authtoken,)
mender_device.run(
"kill -USR1 `pidof mender-connect`"
) # USR1 makes mender-connect print status
assert r.status_code == 201
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {
"Chroot": "/var/lib/mender/filetransfer",
"FollowSymLinks": True, # in the image /var/lib/mender is a symlink
"PreserveMode": True,
},
},
auth,
devid,
)
logger.info("-- testcase: File Transfer limits: preserve modes;")
f = NamedTemporaryFile(delete=False)
f.write(os.urandom(16))
f.close()
r = upload_file(
"/var/lib/mender/filetransfer/modes.bin",
open(f.name, "rb"),
devid,
authtoken,
mode="4711",
)
modes_ls = mender_device.run("ls -al /var/lib/mender/filetransfer/modes.bin")
logger.info(
"test_filetransfer_limits_upload ls -al /var/lib/mender/filetransfer/modes.bin:\n%s"
% modes_ls
)
assert modes_ls.startswith("-rws--x--x")
assert r.status_code == 201
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {
"Chroot": "/var/lib/mender/filetransfer",
"FollowSymLinks": True, # in the image /var/lib/mender is a symlink
"PreserveOwner": True,
"PreserveGroup": True,
},
},
auth,
devid,
)
logger.info("-- testcase: File Transfer limits: preserve owner and group;")
f = NamedTemporaryFile(delete=False)
f.write(os.urandom(16))
f.close()
gid = int(mender_device.run("cat /etc/group | tail -1| cut -f3 -d:"))
uid = int(mender_device.run("cat /etc/passwd | tail -1| cut -f3 -d:"))
logger.info("test_filetransfer_limits_upload gid/uid %d/%d", gid, uid)
r = upload_file(
"/var/lib/mender/filetransfer/ownergroup.bin",
open(f.name, "rb"),
devid,
authtoken,
uid=str(uid),
gid=str(gid),
)
owner_group = mender_device.run(
"ls -aln /var/lib/mender/filetransfer/ownergroup.bin | cut -f 3,4 -d' '"
)
assert owner_group == str(uid) + " " + str(gid) + "\n"
assert r.status_code == 201
def test_filetransfer_limits_download(self, mender_device, devid, auth):
not_implemented_error = False
def assert_forbidden(rsp, message):
global not_implemented_error
try:
assert rsp.status_code == 403
assert rsp.json().get("error") == message
except AssertionError as e:
if r.status_code == 500:
# Expected (current) behavior
not_implemented_error = True
else:
raise e
authtoken = auth.get_auth_token()
"""Tests the file transfer features with limits"""
set_limits(
mender_device,
{
"Enabled": True,
"FileTransfer": {"Chroot": "/var/lib/mender/filetransfer"},
},
auth,
devid,
)
path = "/etc/profile"
r = download_file(path, devid, authtoken)
logger.info(
"-- testcase: File Transfer limits: file outside chroot; download forbidden"
| |
ACUTE
'\u0152': b'\xea', # LATIN CAPITAL LIGATURE OE
'\u0153': b'\xfa', # LATIN SMALL LIGATURE OE
'\u0154': b'\xc2R', # LATIN CAPITAL LETTER R WITH ACUTE
'\u0155': b'\xc2r', # LATIN SMALL LETTER R WITH ACUTE
'\u0156': b'\xd0R', # LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157': b'\xd0r', # LATIN SMALL LETTER R WITH CEDILLA
'\u0158': b'\xcfR', # LATIN CAPITAL LETTER R WITH CARON
'\u0159': b'\xcfr', # LATIN SMALL LETTER R WITH CARON
'\u015a': b'\xc2S', # LATIN CAPITAL LETTER S WITH ACUTE
'\u015b': b'\xc2s', # LATIN SMALL LETTER S WITH ACUTE
'\u015c': b'\xc3S', # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
'\u015d': b'\xc3s', # LATIN SMALL LETTER S WITH CIRCUMFLEX
'\u015e': b'\xd0S', # LATIN CAPITAL LETTER S WITH CEDILLA
'\u015f': b'\xd0s', # LATIN SMALL LETTER S WITH CEDILLA
'\u0160': b'\xcfS', # LATIN CAPITAL LETTER S WITH CARON
'\u0161': b'\xcfs', # LATIN SMALL LETTER S WITH CARON
'\u0162': b'\xd0T', # LATIN CAPITAL LETTER T WITH CEDILLA
'\u0163': b'\xd0t', # LATIN SMALL LETTER T WITH CEDILLA
'\u0164': b'\xcfT', # LATIN CAPITAL LETTER T WITH CARON
'\u0165': b'\xcft', # LATIN SMALL LETTER T WITH CARON
'\u0168': b'\xc4U', # LATIN CAPITAL LETTER U WITH TILDE
'\u0169': b'\xc4u', # LATIN SMALL LETTER U WITH TILDE
'\u016a': b'\xc5U', # LATIN CAPITAL LETTER U WITH MACRON
'\u016b': b'\xc5u', # LATIN SMALL LETTER U WITH MACRON
'\u016c': b'\xc6U', # LATIN CAPITAL LETTER U WITH BREVE
'\u016d': b'\xc6u', # LATIN SMALL LETTER U WITH BREVE
'\u016e': b'\xcaU', # LATIN CAPITAL LETTER U WITH RING ABOVE
'\u016f': b'\xcau', # LATIN SMALL LETTER U WITH RING ABOVE
'\u0170': b'\xcdU', # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171': b'\xcdu', # LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172': b'\xd3U', # LATIN CAPITAL LETTER U WITH OGONEK
'\u0173': b'\xd3u', # LATIN SMALL LETTER U WITH OGONEK
'\u0174': b'\xc3W', # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
'\u0175': b'\xc3w', # LATIN SMALL LETTER W WITH CIRCUMFLEX
'\u0176': b'\xc3Y', # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
'\u0177': b'\xc3y', # LATIN SMALL LETTER Y WITH CIRCUMFLEX
'\u0178': b'\xc8Y', # LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u0179': b'\xc2Z', # LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a': b'\xc2z', # LATIN SMALL LETTER Z WITH ACUTE
'\u017b': b'\xc7Z', # LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c': b'\xc7z', # LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017d': b'\xcfZ', # LATIN CAPITAL LETTER Z WITH CARON
'\u017e': b'\xcfz', # LATIN SMALL LETTER Z WITH CARON
'\u01a0': b'\xceO', # LATIN CAPITAL LETTER O WITH HORN
'\u01a1': b'\xceo', # LATIN SMALL LETTER O WITH HORN
'\u01af': b'\xceU', # LATIN CAPITAL LETTER U WITH HORN
'\u01b0': b'\xceu', # LATIN SMALL LETTER U WITH HORN
'\u01cd': b'\xcfA', # LATIN CAPITAL LETTER A WITH CARON
'\u01ce': b'\xcfa', # LATIN SMALL LETTER A WITH CARON
'\u01cf': b'\xcfI', # LATIN CAPITAL LETTER I WITH CARON
'\u01d0': b'\xcfi', # LATIN SMALL LETTER I WITH CARON
'\u01d1': b'\xcfO', # LATIN CAPITAL LETTER O WITH CARON
'\u01d2': b'\xcfo', # LATIN SMALL LETTER O WITH CARON
'\u01d3': b'\xcfU', # LATIN CAPITAL LETTER U WITH CARON
'\u01d4': b'\xcfu', # LATIN SMALL LETTER U WITH CARON
'\u01d5': b'\xc5\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
'\u01d6': b'\xc5\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND MACRON
'\u01d7': b'\xc2\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
'\u01d8': b'\xc2\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE
'\u01d9': b'\xcf\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
'\u01da': b'\xcf\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND CARON
'\u01db': b'\xc1\xc8U', # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
'\u01dc': b'\xc1\xc8u', # LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE
'\u01de': b'\xc5\xc8A', # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
'\u01df': b'\xc5\xc8a', # LATIN SMALL LETTER A WITH DIAERESIS AND MACRON
'\u01e0': b'\xc5\xc7A', # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
'\u01e1': b'\xc5\xc7a', # LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON
'\u01e2': b'\xc5\xe1', # LATIN CAPITAL LETTER AE WITH MACRON
'\u01e3': b'\xc5\xf1', # LATIN SMALL LETTER AE WITH MACRON
'\u01e6': b'\xcfG', # LATIN CAPITAL LETTER G WITH CARON
'\u01e7': b'\xcfg', # LATIN SMALL LETTER G WITH CARON
'\u01e8': b'\xcfK', # LATIN CAPITAL LETTER K WITH CARON
'\u01e9': b'\xcfk', # LATIN SMALL LETTER K WITH CARON
'\u01ea': b'\xd3O', # LATIN CAPITAL LETTER O WITH OGONEK
'\u01eb': b'\xd3o', # LATIN SMALL LETTER O WITH OGONEK
'\u01ec': b'\xc5\xd3O', # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
'\u01ed': b'\xc5\xd3o', # LATIN SMALL LETTER O WITH OGONEK AND MACRON
'\u01f0': b'\xcfj', # LATIN SMALL LETTER J WITH CARON
'\u01f4': b'\xc2G', # LATIN CAPITAL LETTER G WITH ACUTE
'\u01f5': b'\xc2g', # LATIN SMALL LETTER G WITH ACUTE
'\u01f8': b'\xc1N', # LATIN CAPITAL LETTER N WITH GRAVE
'\u01f9': b'\xc1n', # LATIN SMALL LETTER N WITH GRAVE
'\u01fa': b'\xc2\xcaA', # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
'\u01fb': b'\xc2\xcaa', # LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
'\u01fc': b'\xc2\xe1', # LATIN CAPITAL LETTER AE WITH ACUTE
'\u01fd': b'\xc2\xf1', # LATIN SMALL LETTER AE WITH ACUTE
'\u01fe': b'\xc2\xe9', # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
'\u01ff': b'\xc2\xf9', # LATIN SMALL LETTER O WITH STROKE AND ACUTE
'\u0218': b'\xd2S', # LATIN CAPITAL LETTER S WITH COMMA BELOW
'\u0219': b'\xd2s', # LATIN SMALL LETTER S WITH COMMA BELOW
'\u021a': b'\xd2T', # LATIN CAPITAL LETTER T WITH COMMA BELOW
'\u021b': b'\xd2t', # LATIN SMALL LETTER T WITH COMMA BELOW
'\u021e': b'\xcfH', # LATIN CAPITAL LETTER H WITH CARON
'\u021f': b'\xcfh', # LATIN SMALL LETTER H WITH CARON
'\u0226': b'\xc7A', # LATIN CAPITAL LETTER A WITH DOT ABOVE
'\u0227': b'\xc7a', # LATIN SMALL LETTER A WITH DOT ABOVE
'\u0228': b'\xd0E', # LATIN CAPITAL LETTER E WITH CEDILLA
'\u0229': b'\xd0e', # LATIN SMALL LETTER E WITH CEDILLA
'\u022a': b'\xc5\xc8O', # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
'\u022b': b'\xc5\xc8o', # LATIN SMALL LETTER O WITH DIAERESIS AND MACRON
'\u022c': b'\xc5\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND MACRON
'\u022d': b'\xc5\xc4o', # LATIN SMALL LETTER O WITH TILDE AND MACRON
'\u022e': b'\xc7O', # LATIN CAPITAL LETTER O WITH DOT ABOVE
'\u022f': b'\xc7o', # LATIN SMALL LETTER O WITH DOT ABOVE
'\u0230': b'\xc5\xc7O', # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
'\u0231': b'\xc5\xc7o', # LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON
'\u0232': b'\xc5Y', # LATIN CAPITAL LETTER Y WITH MACRON
'\u0233': b'\xc5y', # LATIN SMALL LETTER Y WITH MACRON
'\u02b9': b'\xbd', # MODIFIER LETTER PRIME
'\u02ba': b'\xbe', # MODIFIER LETTER DOUBLE PRIME
'\u02bb': b'\xb0', # MODIFIER LETTER TURNED COMMA
'\u02bc': b'\xb1', # MODIFIER LETTER APOSTROPHE
'\u0300': b'\xc1', # COMBINING GRAVE ACCENT
'\u0301': b'\xc2', # COMBINING ACUTE ACCENT
'\u0302': b'\xc3', # COMBINING CIRCUMFLEX ACCENT
'\u0303': b'\xc4', # COMBINING TILDE
'\u0304': b'\xc5', # COMBINING MACRON
'\u0306': b'\xc6', # COMBINING BREVE
'\u0307': b'\xc7', # COMBINING DOT ABOVE
'\u0308': b'\xc8', # COMBINING DIAERESIS
'\u0309': b'\xc0', # COMBINING HOOK ABOVE
'\u030a': b'\xca', # COMBINING RING ABOVE
'\u030b': b'\xcd', # COMBINING DOUBLE ACUTE ACCENT
'\u030c': b'\xcf', # COMBINING CARON
'\u0312': b'\xcc', # COMBINING TURNED COMMA ABOVE
'\u0315': b'\xcb', # COMBINING COMMA ABOVE RIGHT
'\u031b': b'\xce', # COMBINING HORN
'\u031c': b'\xd1', # COMBINING LEFT HALF RING BELOW
'\u0323': b'\xd6', # COMBINING DOT BELOW
'\u0324': b'\xd7', # COMBINING DIAERESIS BELOW
'\u0325': b'\xd4', # COMBINING RING BELOW
'\u0326': b'\xd2', # COMBINING COMMA BELOW
'\u0327': b'\xd0', # COMBINING CEDILLA
'\u0328': b'\xd3', # COMBINING OGONEK
'\u0329': b'\xda', # COMBINING VERTICAL LINE BELOW
'\u032d': b'\xdb', # COMBINING CIRCUMFLEX ACCENT BELOW
'\u032e': b'\xd5', # COMBINING BREVE BELOW
'\u0332': b'\xd8', # COMBINING LOW LINE
'\u0333': b'\xd9', # COMBINING DOUBLE LOW LINE
'\u0340': b'\xc1', # COMBINING GRAVE TONE MARK
'\u0341': b'\xc2', # COMBINING ACUTE TONE MARK
'\u0344': b'\xc2\xc8', # COMBINING GREEK DIALYTIKA TONOS
'\u0374': b'\xbd', # GREEK NUMERAL SIGN
'\u037e': b';', # GREEK QUESTION MARK
'\u0387': b'\xb7', # GREEK ANO TELEIA
'\u1e00': b'\xd4A', # LATIN CAPITAL LETTER A WITH RING BELOW
'\u1e01': b'\xd4a', # LATIN SMALL LETTER | |
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == float
def test_r_uint(self):
def f(n):
return n + constant_unsigned_five
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_large_unsigned(self):
large_constant = sys.maxint * 2 + 1 # 0xFFFFFFFF on 32-bit platforms
def f():
return large_constant
a = self.RPythonAnnotator()
with py.test.raises(ValueError):
a.build_types(f, [])
# if you want to get a r_uint, you have to be explicit about it
def test_add_different_ints(self):
def f(a, b):
return a + b
a = self.RPythonAnnotator()
with py.test.raises(UnionError):
a.build_types(f, [r_uint, int])
def test_merge_different_ints(self):
def f(a, b):
if a:
c = a
else:
c = b
return c
a = self.RPythonAnnotator()
with py.test.raises(UnionError):
a.build_types(f, [r_uint, int])
def test_merge_ruint_zero(self):
def f(a):
if a:
c = a
else:
c = 0
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_merge_ruint_nonneg_signed(self):
def f(a, b):
if a:
c = a
else:
assert b >= 0
c = b
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint, int])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_prebuilt_long_that_is_not_too_long(self):
small_constant = 12L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 12
assert s.nonneg
assert not s.unsigned
#
small_constant = -23L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == -23
assert not s.nonneg
assert not s.unsigned
def test_pbc_getattr(self):
class C:
def __init__(self, v1, v2):
self.v2 = v2
self.v1 = v1
def _freeze_(self):
return True
c1 = C(1,'a')
c2 = C(2,'b')
c3 = C(3,'c')
def f1(l, c):
l.append(c.v1)
def f2(l, c):
l.append(c.v2)
def g():
l1 = []
l2 = []
f1(l1, c1)
f1(l1, c2)
f2(l2, c2)
f2(l2, c3)
return l1,l2
a = self.RPythonAnnotator()
s = a.build_types(g,[])
l1, l2 = s.items
assert listitem(l1).knowntype == int
assert listitem(l2).knowntype == str
acc1 = a.bookkeeper.getdesc(c1).getattrfamily()
acc2 = a.bookkeeper.getdesc(c2).getattrfamily()
acc3 = a.bookkeeper.getdesc(c3).getattrfamily()
assert acc1 is acc2 is acc3
assert len(acc1.descs) == 3
assert dict.fromkeys(acc1.attrs) == {'v1': None, 'v2': None}
def test_single_pbc_getattr(self):
class C:
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def _freeze_(self):
return True
c1 = C(11, "hello")
c2 = C(22, 623)
def f1(l, c):
l.append(c.v1)
def f2(c):
return c.v2
def f3(c):
return c.v2
def g():
l = []
f1(l, c1)
f1(l, c2)
return l, f2(c1), f3(c2)
a = self.RPythonAnnotator()
s = a.build_types(g,[])
s_l, s_c1v2, s_c2v2 = s.items
assert listitem(s_l).knowntype == int
assert s_c1v2.const == "hello"
assert s_c2v2.const == 623
acc1 = a.bookkeeper.getdesc(c1).getattrfamily()
acc2 = a.bookkeeper.getdesc(c2).getattrfamily()
assert acc1 is acc2
assert acc1.attrs.keys() == ['v1']
def test_isinstance_unsigned_1(self):
def f(x):
return isinstance(x, r_uint)
def g():
v = r_uint(1)
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.const == True
def test_isinstance_unsigned_2(self):
class Foo:
pass
def f(x):
return isinstance(x, r_uint)
def g():
v = Foo()
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.const == False
def test_isinstance_base_int(self):
def f(x):
return isinstance(x, base_int)
def g(n):
v = r_uint(n)
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.const == True
def test_alloc_like(self):
class Base(object):
pass
class C1(Base):
pass
class C2(Base):
pass
def inst(cls):
return cls()
def alloc(cls):
i = inst(cls)
assert isinstance(i, cls)
return i
alloc._annspecialcase_ = "specialize:arg(0)"
def f():
c1 = alloc(C1)
c2 = alloc(C2)
return c1,c2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
C1df = a.bookkeeper.getuniqueclassdef(C1)
C2df = a.bookkeeper.getuniqueclassdef(C2)
assert s.items[0].classdef == C1df
assert s.items[1].classdef == C2df
allocdesc = a.bookkeeper.getdesc(alloc)
s_C1 = a.bookkeeper.immutablevalue(C1)
s_C2 = a.bookkeeper.immutablevalue(C2)
graph1 = allocdesc.specialize([s_C1], None)
graph2 = allocdesc.specialize([s_C2], None)
assert a.binding(graph1.getreturnvar()).classdef == C1df
assert a.binding(graph2.getreturnvar()).classdef == C2df
assert graph1 in a.translator.graphs
assert graph2 in a.translator.graphs
def test_specialcase_args(self):
class C1(object):
pass
class C2(object):
pass
def alloc(cls, cls2):
i = cls()
assert isinstance(i, cls)
j = cls2()
assert isinstance(j, cls2)
return i
def f():
alloc(C1, C1)
alloc(C1, C2)
alloc(C2, C1)
alloc(C2, C2)
alloc._annspecialcase_ = "specialize:arg(0,1)"
a = self.RPythonAnnotator()
C1df = a.bookkeeper.getuniqueclassdef(C1)
C2df = a.bookkeeper.getuniqueclassdef(C2)
s = a.build_types(f, [])
allocdesc = a.bookkeeper.getdesc(alloc)
s_C1 = a.bookkeeper.immutablevalue(C1)
s_C2 = a.bookkeeper.immutablevalue(C2)
graph1 = allocdesc.specialize([s_C1, s_C2], None)
graph2 = allocdesc.specialize([s_C2, s_C2], None)
assert a.binding(graph1.getreturnvar()).classdef == C1df
assert a.binding(graph2.getreturnvar()).classdef == C2df
assert graph1 in a.translator.graphs
assert graph2 in a.translator.graphs
def test_specialize_arg_bound_method(self):
class GC(object):
def trace(self, callback, *args):
return callback(*args)
trace._annspecialcase_ = "specialize:arg(1)"
def callback1(self, arg1):
self.x = arg1
return "hello"
def callback2(self, arg2, arg3):
self.y = arg2
self.z = arg3
return 6
def f():
gc = GC()
s1 = gc.trace(gc.callback1, "foo")
n2 = gc.trace(gc.callback2, 7, 2)
return (s1, n2, gc.x, gc.y, gc.z)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.items[0].const == "hello"
assert s.items[1].const == 6
assert s.items[2].const == "foo"
assert s.items[3].const == 7
assert s.items[4].const == 2
def test_specialize_and_star_args(self):
class I(object):
def execute(self, op, *args):
if op == 0:
return args[0]+args[1]
if op == 1:
return args[0] * args[1] + args[2]
execute._annspecialcase_ = "specialize:arg(1)"
def f(x, y):
i = I()
a = i.execute(0, x, y)
b = i.execute(1, y, y, 5)
return a+b
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
executedesc = a.bookkeeper.getdesc(I.execute.im_func)
assert len(executedesc._cache) == 2
assert len(executedesc._cache[(0, 'star', 2)].startblock.inputargs) == 4
assert len(executedesc._cache[(1, 'star', 3)].startblock.inputargs) == 5
def test_specialize_arg_or_var(self):
def f(a):
return 1
f._annspecialcase_ = 'specialize:arg_or_var(0)'
def fn(a):
return f(3) + f(a)
a = self.RPythonAnnotator()
a.build_types(fn, [int])
executedesc = a.bookkeeper.getdesc(f)
assert sorted(executedesc._cache.keys()) == [None, (3,)]
# we got two different special
def test_specialize_call_location(self):
def g(a):
return a
g._annspecialcase_ = "specialize:call_location"
def f(x):
return g(x)
f._annspecialcase_ = "specialize:argtype(0)"
def h(y):
w = f(y)
return int(f(str(y))) + w
a = self.RPythonAnnotator()
assert a.build_types(h, [int]) == annmodel.SomeInteger()
def test_assert_list_doesnt_lose_info(self):
class T(object):
pass
def g(l):
assert isinstance(l, list)
return l
def f():
l = [T()]
return g(l)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
s_item = listitem(s)
assert isinstance(s_item, annmodel.SomeInstance)
assert s_item.classdef is a.bookkeeper.getuniqueclassdef(T)
def test_int_str_mul(self):
def f(x,a,b):
return a*x+x*b
a = self.RPythonAnnotator()
s = a.build_types(f, [str,int,int])
assert s.knowntype == str
def test_list_tuple(self):
def g0(x):
return list(x)
def g1(x):
return list(x)
def f(n):
l1 = g0(())
l2 = g1((1,))
if n:
t = (1,)
else:
t = (2,)
l3 = g1(t)
return l1, l2, l3
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert listitem(s.items[0]) == annmodel.SomeImpossibleValue()
assert listitem(s.items[1]).knowntype == int
assert listitem(s.items[2]).knowntype == int
def test_empty_list(self):
def f():
l = []
return bool(l)
def g():
l = []
x = bool(l)
l.append(1)
return x, bool(l)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == False
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.items[0].knowntype == bool and not s.items[0].is_constant()
assert s.items[1].knowntype == bool and not s.items[1].is_constant()
def test_empty_dict(self):
def f():
d = {}
return bool(d)
def g():
d = {}
x = bool(d)
d['a'] = 1
return x, bool(d)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == False
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.items[0].knowntype == bool and not s.items[0].is_constant()
assert s.items[1].knowntype == bool and not s.items[1].is_constant()
def test_call_two_funcs_but_one_can_only_raise(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.call_two_funcs_but_one_can_only_raise,
[int])
assert s == a.bookkeeper.immutablevalue(None)
def test_reraiseKeyError(self):
def f(dic):
try:
dic[5]
except KeyError:
raise
a = self.RPythonAnnotator()
a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.const = KeyError
t.is_type_of = [ev]
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
def test_reraiseAnything(self):
def f(dic):
try:
dic[5]
except:
raise
a = self.RPythonAnnotator()
a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.is_type_of = [ev]
t.const = KeyError # IndexError ignored because 'dic' is a dict
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
def test_exception_mixing(self):
def h():
pass
def g():
pass
class X(Exception):
def __init__(self, x=0):
self.x = x
def f(a, l):
if a==1:
raise X
elif a==2:
raise X(1)
elif a==3:
raise X(4)
else:
try:
l[0]
x,y = l
g()
finally:
h()
a = self.RPythonAnnotator()
a.build_types(f, [int, somelist(annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.is_type_of = [ev]
assert a.binding(et) == t
assert | |
the slugging
percentage. Percentage ranges from 0-1.
"""
return self._home_on_base_plus
@int_property_decorator
def home_pitches(self):
"""
Returns an ``int`` of the number of pitches the home team faced.
"""
return self._home_pitches
@int_property_decorator
def home_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the home team.
"""
return self._home_strikes
@float_property_decorator
def home_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the home team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._home_win_probability_for_offensive_player
@float_property_decorator
def home_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the home team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._home_average_leverage_index
@float_property_decorator
def home_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_added
@float_property_decorator
def home_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_subtracted
@float_property_decorator
def home_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the home
team.
"""
return self._home_base_out_runs_added
@int_property_decorator
def home_putouts(self):
"""
Returns an ``int`` of the number of putouts the home team registered.
"""
return self._home_putouts
@int_property_decorator
def home_assists(self):
"""
Returns an ``int`` of the number of assists the home team registered.
"""
return self._home_assists
@float_property_decorator
def home_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the home team pitched.
"""
return self._home_innings_pitched
@int_property_decorator
def home_home_runs(self):
"""
Returns an ``int`` of the number of times the home team gave up a home
run.
"""
return self._home_home_runs
@int_property_decorator
def home_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who made contact with the pitch.
"""
return self._home_strikes_by_contact
@int_property_decorator
def home_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was swinging.
"""
return self._home_strikes_swinging
@int_property_decorator
def home_strikes_looking(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was looking.
"""
return self._home_strikes_looking
@int_property_decorator
def home_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the home team
allowed.
"""
return self._home_grounded_balls
@int_property_decorator
def home_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the home team allowed.
"""
return self._home_fly_balls
@int_property_decorator
def home_line_drives(self):
"""
Returns an ``int`` of the number of line drives the home team allowed.
"""
return self._home_line_drives
@int_property_decorator
def home_unknown_bat_type(self):
"""
Returns an ``int`` of the number of home at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._home_unknown_bat_type
@int_property_decorator
def home_game_score(self):
"""
Returns an ``int`` of the starting home pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._home_game_score
@int_property_decorator
def home_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._home_inherited_runners
@int_property_decorator
def home_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._home_inherited_score
@float_property_decorator
def home_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the home pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._home_win_probability_by_pitcher
@float_property_decorator
def home_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the home pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._home_base_out_runs_saved
class Boxscores:
"""
Search for MLB games taking place on a particular day.
Retrieve a dictionary which contains a list of all games being played on a
particular day. Output includes a link to the boxscore, and the names and
abbreviations for both the home teams. If no games are played on a
particular day, the list will be empty.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will be
pulled. If left empty, or if 'end_date' is prior to 'date', only the
games from the day specified in the 'date' parameter will be saved.
"""
def __init__(self, date, end_date=None):
self._boxscores = {}
self._find_games(date, end_date)
@property
def games(self):
"""
Returns a ``dictionary`` object representing all of the games played on
the requested day. Dictionary is in the following format::
{
'date': [ # 'date' is the string date in format 'MM-DD-YYYY'
{
'home_name': Name of the home team, such as 'New York
Yankees' (`str`),
'home_abbr': Abbreviation for the home team, such as
'NYY' (`str`),
'away_name': Name of the away team, such as 'Houston
Astros' (`str`),
'away_abbr': Abbreviation for the away team, such as
'HOU' (`str`),
'boxscore': String representing the boxscore URI, such
as 'SLN/SLN201807280' (`str`),
'winning_name': Full name of the winning team, such as
'New York Yankees' (`str`),
'winning_abbr': Abbreviation for the winning team, such
as 'NYY' (`str`),
'losing_name': Full name of the losing team, such as
'<NAME>' (`str`),
'losing_abbr': Abbreviation for the losing team, such
as 'HOU' (`str`),
'home_score': Integer score for the home team (`int`),
'away_score': Integer score for the away team (`int`)
},
{ ... },
...
]
}
If no games were played on 'date', the list for ['date'] will be empty.
"""
return self._boxscores
def _create_url(self, date):
"""
Build the URL based on the passed datetime object.
In order to get the proper boxscore page, the URL needs to include the
requested month, day, and year.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
Returns
-------
string
Returns a ``string`` of the boxscore URL including the requested
date.
"""
return BOXSCORES_URL % (date.year, date.month, date.day)
def _get_requested_page(self, url):
"""
Get the requested page.
Download the requested page given the created URL and return a PyQuery
object.
Parameters
----------
url : string
The URL containing the boxscores to find.
Returns
-------
PyQuery object
A PyQuery object containing the HTML contents of the requested
page.
"""
return pq(url)
def _get_boxscore_uri(self, url):
"""
Find the boxscore URI.
Given the boxscore tag for a game, parse the embedded URI for the
boxscore.
Parameters
----------
url : PyQuery object
A PyQuery object containing the game's boxscore tag which has the
boxscore URI embedded within it.
Returns
-------
string
Returns a ``string`` containing the link to the game's boxscore
page.
"""
uri = re.sub(r'.*/boxes/', '', str(url))
uri = re.sub(r'\.shtml.*', '', uri).strip()
return uri
def _parse_abbreviation(self, abbr):
"""
Parse a team's abbreviation.
Given the team's HTML name tag, parse their abbreviation.
Parameters
----------
abbr : string
A string of a team's HTML name tag.
Returns
-------
string
Returns a ``string`` of the team's abbreviation.
"""
abbr = re.sub(r'.*/teams/', '', str(abbr))
abbr = re.sub(r'/.*', '', abbr)
return abbr
def _get_name(self, name):
"""
Find a team's name and abbreviation.
Given the team's HTML name tag, determine their name, and abbreviation.
Parameters
----------
name : PyQuery object
A PyQuery object of a team's HTML name tag in the boxscore.
Returns
-------
tuple
Returns a tuple containing the name and abbreviation for a team.
Tuple is in the following order: Team Name, Team Abbreviation.
"""
team_name = name.text()
abbr = self._parse_abbreviation(name)
return team_name, abbr
def _get_score(self, score_link):
"""
Find a team's final score.
Given an | |
returned a token or ``None``
if no token was returned
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
self._username = username
if self._gss_srv_ctxt is None:
self._gss_srv_ctxt = gssapi.AcceptContext()
token = self._gss_srv_ctxt.step(recv_token)
self._gss_srv_ctxt_status = self._gss_srv_ctxt.established
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``gssapi.GSSException`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if self._username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify_mic(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
self._gss_ctxt.verify_mic(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
"""
if self._gss_srv_ctxt.delegated_cred is not None:
return True
return False
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentials if credentials are delegated
(server mode).
:param str client_token: The GSS-API token received form the client
:raises:
``NotImplementedError`` -- Credential delegation is currently not
supported in server mode
"""
raise NotImplementedError
if __version_info__ < (2, 5):
# provide the old name for strict backward compatibility
_SSH_GSSAPI = _SSH_GSSAPI_OLD
class _SSH_GSSAPI_NEW(_SSH_GSSAuth):
"""
Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
using the newer, currently maintained gssapi package.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
gssapi.RequirementFlag.protection_ready,
gssapi.RequirementFlag.integrity,
gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.delegate_to_peer,
)
else:
self._gss_flags = (
gssapi.RequirementFlag.protection_ready,
gssapi.RequirementFlag.integrity,
gssapi.RequirementFlag.mutual_authentication,
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a GSS-API context.
:param str username: The name of the user who attempts to login
:param str target: The hostname of the target to connect to
:param str desired_mech: The negotiated GSS-API mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param str recv_token: The GSS-API token received from the Server
:raises: `.SSHException` -- Is raised if the desired mechanism of the
client is not supported
:raises: ``gssapi.exceptions.GSSError`` if there is an error signaled
by the GSS-API implementation
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
targ_name = gssapi.Name(
"host@" + self._gss_host,
name_type=gssapi.NameType.hostbased_service,
)
if desired_mech is not None:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
krb5_mech = gssapi.MechType.kerberos
token = None
if recv_token is None:
self._gss_ctxt = gssapi.SecurityContext(
name=targ_name,
flags=self._gss_flags,
mech=krb5_mech,
usage="initiate",
)
token = self._gss_ctxt.step(token)
else:
token = self._gss_ctxt.step(recv_token)
self._gss_ctxt_status = self._gss_ctxt.complete
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
:return: gssapi-with-mic:
Returns the MIC token from GSS-API for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from GSS-API with the SSH session ID as
message.
:rtype: str
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.get_signature(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.get_signature(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, recv_token, username=None):
"""
Accept a GSS-API context (server mode).
:param str hostname: The servers hostname
:param str username: The name of the user who attempts to login
:param str recv_token: The GSS-API Token received from the server,
if it's not the initial call.
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
self._username = username
if self._gss_srv_ctxt is None:
self._gss_srv_ctxt = gssapi.SecurityContext(usage="accept")
token = self._gss_srv_ctxt.step(recv_token)
self._gss_srv_ctxt_status = self._gss_srv_ctxt.complete
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``gssapi.exceptions.GSSError`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if self._username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify_signature(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
self._gss_ctxt.verify_signature(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
:rtype: bool
"""
if self._gss_srv_ctxt.delegated_creds is not None:
return True
return False
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentials if credentials are delegated
(server mode).
:param str client_token: The GSS-API token received form the client
:raises: ``NotImplementedError`` -- Credential delegation is currently
not supported in server mode
"""
raise NotImplementedError
class _SSH_SSPI(_SSH_GSSAuth):
"""
Implementation of the Microsoft SSPI Kerberos Authentication for SSH2.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
sspicon.ISC_REQ_INTEGRITY
| sspicon.ISC_REQ_MUTUAL_AUTH
| sspicon.ISC_REQ_DELEGATE
)
else:
self._gss_flags = (
sspicon.ISC_REQ_INTEGRITY | sspicon.ISC_REQ_MUTUAL_AUTH
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a SSPI context.
:param str username: The name of the user who attempts to login
:param str target: The FQDN of the target to connect to
:param str desired_mech: The negotiated SSPI mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param recv_token: The SSPI token received from the Server
:raises:
`.SSHException` -- Is raised if the desired mechanism of the client
is not supported
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
error = 0
targ_name = "host/" + self._gss_host
if desired_mech is not None:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
try:
if recv_token is None:
self._gss_ctxt = sspi.ClientAuth(
"Kerberos", scflags=self._gss_flags, targetspn=targ_name
)
error, token = self._gss_ctxt.authorize(recv_token)
token = token[0].Buffer
except pywintypes.error as e:
e.strerror += ", Target: {}".format(self._gss_host)
raise
if error == 0:
"""
if the status is GSS_COMPLETE (error = 0) the context is fully
established an we can set _gss_ctxt_status to True.
"""
self._gss_ctxt_status = True
token = None
"""
You won't get another token if the context is fully established,
so i set token to None instead of ""
"""
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not
:return: gssapi-with-mic:
Returns the MIC token from SSPI for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from SSPI with the SSH session ID as
message.
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.sign(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.sign(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, username, recv_token):
"""
Accept a SSPI context (server mode).
:param str hostname: The servers FQDN
:param str username: The name of the user who attempts to login
:param str recv_token: The SSPI Token received from the server,
if it's not the initial call.
:return: A ``String`` if the SSPI has returned a token | |
args
reach_point_args = inspect.getfullargspec(ReachPoint).args
# create a list of input arguments from the columns in the row
input_args = []
for arg in reach_point_args[1:]:
if arg in row_dict.keys():
input_args.append(row_dict[arg])
else:
input_args.append(None)
# use the input args to create a new reach point
reach_point = ReachPoint(*input_args)
# add the reach point to the reach points list
reach._reach_points.append(reach_point)
# try to get the line geometry, and use this for the reach geometry
fs_line = reach_line_layer.query_by_reach_id(reach_id)
if len(fs_line.features) > 0:
for this_feature in fs_line.features:
if this_feature.geometry is not None:
reach._geometry = Geometry(this_feature.geometry)
break
# return the reach object
return reach
def _get_accesses_by_type(self, access_type):
# check to ensure the correct access type is being specified
if access_type != 'putin' and access_type != 'takeout' and access_type != 'intermediate':
raise Exception('access type must be either "putin", "takeout" or "intermediate"')
# return list of all accesses of specified type
return [pt for pt in self._reach_points if pt.subtype == access_type and pt.point_type == 'access']
def _set_putin_takeout(self, access, access_type):
"""
Set the putin or takeout using a ReachPoint object.
:param access: ReachPoint - Required
ReachPoint geometry delineating the location of the geometry to be modified.
:param access_type: String - Required
Either "putin" or "takeout".
:return:
"""
# enforce correct object type
if type(access) != ReachPoint:
raise Exception('{} access must be an instance of ReachPoint object type'.format(access_type))
# check to ensure the correct access type is being specified
if access_type != 'putin' and access_type != 'takeout':
raise Exception('access type must be either "putin" or "takeout"')
# update the list to NOT include the point we are adding
self._reach_points = [pt for pt in self._reach_points if pt.subtype != access_type]
# ensure the new point being added is the right type
access.point_type = 'access'
access.subtype = access_type
# add it to the reach point list
self._reach_points.append(access)
@property
def putin(self):
access_df = self._get_accesses_by_type('putin')
if len(access_df) > 0:
return access_df[0]
else:
return None
def set_putin(self, access):
self._set_putin_takeout(access, 'putin')
@property
def takeout(self):
access_lst = self._get_accesses_by_type('takeout')
if len(access_lst) > 0:
return access_lst[0]
else:
return None
def set_takeout(self, access):
self._set_putin_takeout(access, 'takeout')
@property
def intermediate_accesses(self):
access_df = self._get_accesses_by_type('intermediate')
if len(access_df) > 0:
return access_df
else:
return None
def add_intermediate_access(self, access):
access.set_type('intermediate')
self.access_list.append(access)
def snap_putin_and_takeout_and_trace(self, webmap=False, gis=None):
"""
Update the putin and takeout coordinates, and trace the hydroline
using the EPA's WATERS services.
:param webmap: Boolean - Optional
Return a web map widget if successful - useful for visualizing single reach.
:param gis: Active GIS for performing hydrology analysis.
:return:
"""
# ensure a putin and takeout actually were found
if self.putin is None or self.takeout is None:
self.error = True
self.notes = 'Reach does not appear to have both a put-in and take-out location defined.'
trace_status = False
# if there is something to work with, keep going
else:
# get the snapped and corrected reach locations for the put-in
self.putin.snap_to_nhdplus()
# if a put-in was not located using the WATERS service, flag
if self.putin.nhdplus_measure is None or self.putin.nhdplus_reach_id is None:
nhd_status = False
# if the put-in was located using WATERS, flag as successful
else:
nhd_status = True
# initialize trace_status to False first
trace_status = False
if nhd_status:
# try to trace a few times using WATERS, but if it doesn't work, bingo to Esri Hydrology
attempts = 0
max_attempts = 5
while attempts < max_attempts:
try:
# use the EPA navigate service to trace downstream
waters = WATERS()
trace_polyline = waters.get_downstream_navigation_polyline(self.putin.nhdplus_reach_id,
self.putin.nhdplus_measure)
# project the takeout geometry to the same spatial reference as the trace polyline
takeout_geom = self.takeout.geometry.match_spatial_reference(self.takeout.geometry)
# snap the takeout geometry to the hydroline
takeout_geom = takeout_geom.snap_to_line(trace_polyline)
# update the takeout to the snapped point
self.takeout.set_geometry(takeout_geom)
# now dial in the coordinates using the EPA service - getting the rest of the attributes
self.takeout.snap_to_nhdplus()
# ensure a takeout was actually found
if self.takeout.nhdplus_measure is None or self.takeout.nhdplus_reach_id is None:
self.error = True
self.notes = 'Takeout could not be located using EPS\'s WATERS service'
trace_status = False
else:
trace_status = True
self.tracing_method = 'EPA WATERS NHD Plus v2'
break
except:
# increment the attempt counter
attempts += 1
# if the put-in has not yet been located using the WATERS service
if not trace_status:
# do a little voodoo to get a feature set containing just the put-in
pts_df = self.reach_points_as_dataframe
putin_fs = pts_df[
(pts_df['point_type'] == 'access') & (pts_df['subtype'] == 'putin')
].spatial.to_featureset()
# use the feature set to get a response from the watershed function using Esri's Hydrology service
wtrshd_resp = hydrology.watershed(
input_points=putin_fs,
point_id_field='reach_id',
snap_distance=100,
snap_distance_units='Meters',
gis=gis
)
# update the putin if a point was found using the watershed function
if len(wtrshd_resp._fields) and len(wtrshd_resp.snapped_points.features):
putin = self.putin
putin_geometry = wtrshd_resp.snapped_points.features[0].geometry
putin_geometry['spatialReference'] = wtrshd_resp.snapped_points.spatial_reference
putin.set_geometry(Geometry(putin_geometry))
self.set_putin(putin)
# if a putin was not found, quit swimming in the ocean
else:
self.error = True
self.notes = 'Put-in could not be located with neither WATERS nor Esri Hydrology services.'
# trace using Esri Hydrology services
attempts = 10
fail_count = 0
# set variable for tracking the trace response
trace_resp = None
# try to get a trace response
while fail_count < attempts:
try:
trace_resp = hydrology.trace_downstream(putin_fs, point_id_field='reach_id', gis=gis)
break
except:
fail_count = fail_count + 1
# if the trace was successful
if trace_resp and not self.error:
# extract out the trace geometry
trace_geom = trace_resp.features[0].geometry
trace_geom['spatialReference'] = trace_resp.spatial_reference
trace_geom = Geometry(trace_geom)
# save the resolution for the smoothing later
trace_data_resolution = float(trace_resp.features[0].attributes['DataResolution'])
# snap the takeout to the traced line
takeout_geom = self.takeout.geometry.snap_to_line(trace_geom)
self.takeout.set_geometry(takeout_geom)
# trim the reach line to the takeout
line_geom = trace_geom.trim_at_point(self.takeout.geometry)
# ensure there are more than two vertices for smoothing
if line_geom.coordinates().size > 6:
# smooth the geometry since the hydrology tracing can appear a little jagged
self._geometry = _smooth_geometry(line_geom,
densify_max_segment_length=trace_data_resolution * 2,
gis=gis)
else:
self._geometry = line_geom
trace_status = True
self.tracing_method = "ArcGIS Online Hydrology Services"
# if neither of those worked, flag the error
if not trace_status:
self.error = True
self.notes = "The reach could not be trace with neither the EPA's WATERS service nor the Esri " \
"Hydrology services."
# if map result desired, return it
if webmap:
return self.plot_map()
else:
return trace_status
@property
def geometry(self):
"""
Return the reach polyline geometry.
:return: Polyline Geometry
"""
return self._geometry
def _get_feature_attributes(self):
"""helper function for exporting features"""
srs = pd.Series(dir(self))
srs = srs[
(~srs.str.startswith('_'))
& (~srs.str.contains('as_'))
& (srs != 'putin')
& (srs != 'takeout')
& (srs != 'intermediate_accesses')
& (srs != 'geometry')
& (srs != 'has_a_point')
]
srs = srs[srs.apply(lambda p: not hasattr(getattr(self, p), '__call__'))]
return {key: getattr(self, key) for key in srs}
@property
def as_feature(self):
"""
Get the reach as an ArcGIS Python API Feature object.
:return: ArcGIS Python API Feature object representing the reach.
"""
if self.geometry:
feat = Feature(geometry=self.geometry, attributes=self._get_feature_attributes())
else:
feat = Feature(attributes=self._get_feature_attributes())
return feat
@property
def as_centroid_feature(self):
"""
Get a feature with the centroid geometry.
:return: Feature with point geometry for the reach centroid.
"""
return Feature(geometry=self.centroid, attributes=self._get_feature_attributes())
def publish(self, reach_line_layer, reach_centroid_layer, reach_point_layer):
"""
Publish the reach to three feature layers; the reach line layer, the reach centroid layer,
and the reach points layer.
:param gis: GIS object providing the credentials.
:param reach_line_layer: ReachLayer with line geometry to publish to.
:param reach_centroid_layer: ReachLayer with point geometry for the centroid to publish to.
:param reach_point_layer: ReachPointLayer
:return: Boolean True if successful and False if not
"""
if not self.putin and not self.takeout:
return False
# add the reach line if it was successfully traced
if not self.error:
resp_line = reach_line_layer.add_reach(self)
add_line = len(resp_line['addResults'])
# regardless, add the centroid and points
resp_centroid = reach_centroid_layer.add_reach(self)
add_centroid = len(resp_centroid['addResults'])
resp_point = reach_point_layer.add_reach(self)
add_point = len(resp_point['addResults'])
# check results for adds and return correct response
if not self.error and add_line and add_centroid and add_point:
return True
elif add_centroid and add_point:
return True
else:
return False
def publish_updates(self, reach_line_layer, reach_centroid_layer, reach_point_layer):
"""
Based on the current status of the | |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder base exception handling.
Includes decorator for re-raising Cinder-type exceptions.
SHOULD include dedicated exception logging.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import exception as obj_exc
import six
import webob.exc
from cinder.i18n import _, _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class CinderException(Exception):
"""Base Cinder Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(CinderException, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return six.text_type(self.msg)
class VolumeBackendAPIException(CinderException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class VolumeDriverException(CinderException):
message = _("Volume driver reported an error: %(message)s")
class BackupDriverException(CinderException):
message = _("Backup driver reported an error: %(message)s")
class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver not ready.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot: %(reason)s")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
safe = True
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s .")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeAdminMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no administration metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata: %(reason)s")
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeAccessNotFound(NotFound):
message = _("Volume type access not found for %(volume_type_id)s / "
"%(project_id)s combination.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class VolumeTypeInUse(CinderException):
message = _("Volume Type %(volume_type_id)s deletion is not allowed with "
"volumes present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(CinderException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(CinderException):
message = _("Quota exceeded for resources: %(overs)s")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class Duplicate(CinderException):
pass
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class VolumeTypeAccessExists(Duplicate):
message = _("Volume type access for %(volume_type_id)s / "
"%(project_id)s combination already exists.")
class VolumeTypeEncryptionExists(Invalid):
message = _("Volume type encryption for type %(type_id)s already exists.")
class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(CinderException):
message = _("No valid host was found. %(reason)s")
class NoMoreTargets(CinderException):
"""No more available targets."""
pass
class QuotaError(CinderException):
message = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds allowed %(name)s "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'gigabytes')
super(VolumeSizeExceedsAvailableQuota, self).__init__(
message, **kwargs)
class VolumeSizeExceedsLimit(QuotaError):
message = _("Requested volume size %(size)d is larger than "
"maximum allowed limit %(limit)d.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for "
"quota '%(name)s'.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'volumes')
super(VolumeLimitExceeded, self).__init__(message, **kwargs)
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(CinderException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(VolumeDriverException):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class FailedCmdWithDump(VolumeDriverException):
message = _("Operation failed | |
mem[155] = 32'h5780;
mem[156] = 32'h5800;
mem[157] = 32'h5880;
mem[158] = 32'h5900;
mem[159] = 32'h5980;
mem[160] = 32'h5a00;
mem[161] = 32'h5a80;
mem[162] = 32'h5b00;
mem[163] = 32'h5b80;
mem[164] = 32'h5c00;
mem[165] = 32'h5c80;
mem[166] = 32'h5d00;
mem[167] = 32'h5d80;
mem[168] = 32'h5e00;
mem[169] = 32'h5e80;
mem[170] = 32'h5f00;
mem[171] = 32'h5f80;
mem[172] = 32'h6000;
mem[173] = 32'h6080;
mem[174] = 32'h6100;
mem[175] = 32'h6180;
mem[176] = 32'h6200;
mem[177] = 32'h6280;
mem[178] = 32'h6300;
mem[179] = 32'h6380;
mem[180] = 32'h6400;
mem[181] = 32'h6480;
mem[182] = 32'h6500;
mem[183] = 32'h6580;
mem[184] = 32'h6600;
mem[185] = 32'h6680;
mem[186] = 32'h6700;
mem[187] = 32'h6780;
mem[188] = 32'h6800;
mem[189] = 32'h6880;
mem[190] = 32'h6900;
mem[191] = 32'h6980;
mem[192] = 32'h6a00;
mem[193] = 32'h6a80;
mem[194] = 32'h6b00;
mem[195] = 32'h6b80;
mem[196] = 32'h6c00;
mem[197] = 32'h6c80;
mem[198] = 32'h6d00;
mem[199] = 32'h6d80;
mem[200] = 32'h6e00;
mem[201] = 32'h6e80;
mem[202] = 32'h6f00;
mem[203] = 32'h6f80;
mem[204] = 32'h7000;
mem[205] = 32'h7080;
mem[206] = 32'h7100;
mem[207] = 32'h7180;
mem[208] = 32'h7200;
mem[209] = 32'h7280;
mem[210] = 32'h7300;
mem[211] = 32'h7380;
mem[212] = 32'h7400;
mem[213] = 32'h7480;
mem[214] = 32'h7500;
mem[215] = 32'h7580;
mem[216] = 32'h7600;
mem[217] = 32'h7680;
mem[218] = 32'h7700;
mem[219] = 32'h7780;
mem[220] = 32'h7800;
mem[221] = 32'h7880;
mem[222] = 32'h7900;
mem[223] = 32'h7980;
mem[224] = 32'h7a00;
mem[225] = 32'h7a80;
mem[226] = 32'h7b00;
mem[227] = 32'h7b80;
mem[228] = 32'h7c00;
mem[229] = 32'h7c80;
mem[230] = 32'h7d00;
mem[231] = 32'h7d80;
mem[232] = 32'h7e00;
mem[233] = 32'h7e80;
mem[234] = 32'h7f00;
mem[235] = 32'h7f80;
mem[236] = 32'h8000;
mem[237] = 32'h8080;
mem[238] = 32'h8100;
mem[239] = 32'h8180;
mem[240] = 32'h8200;
mem[241] = 32'h8280;
mem[242] = 32'h8300;
mem[243] = 32'h8380;
mem[244] = 32'h8400;
mem[245] = 32'h8480;
mem[246] = 32'h8500;
mem[247] = 32'h8580;
mem[248] = 32'h8600;
mem[249] = 32'h8680;
mem[250] = 32'h8700;
mem[251] = 32'h8780;
mem[252] = 32'h8800;
mem[253] = 32'h8880;
mem[254] = 32'h8900;
mem[255] = 32'h8980;
mem[256] = 32'h8a00;
mem[257] = 32'h8a80;
mem[258] = 32'h8b00;
mem[259] = 32'h8b80;
mem[260] = 32'h8c00;
mem[261] = 32'h8c80;
mem[262] = 32'h8d00;
mem[263] = 32'h8d80;
mem[264] = 32'h8e00;
mem[265] = 32'h8e80;
mem[266] = 32'h8f00;
mem[267] = 32'h8f80;
mem[268] = 32'h9000;
mem[269] = 32'h9080;
mem[270] = 32'h9100;
mem[271] = 32'h9180;
mem[272] = 32'h9200;
mem[273] = 32'h9280;
mem[274] = 32'h9300;
mem[275] = 32'h9380;
mem[276] = 32'h9400;
mem[277] = 32'h9480;
mem[278] = 32'h9500;
mem[279] = 32'h9580;
mem[280] = 32'h9600;
mem[281] = 32'h9680;
mem[282] = 32'h9700;
mem[283] = 32'h9780;
mem[284] = 32'h9800;
mem[285] = 32'h9880;
mem[286] = 32'h9900;
mem[287] = 32'h9980;
mem[288] = 32'h9a00;
mem[289] = 32'h9a80;
mem[290] = 32'h9b00;
mem[291] = 32'h9b80;
mem[292] = 32'h9c00;
mem[293] = 32'h9c80;
mem[294] = 32'h9d00;
mem[295] = 32'h9d80;
mem[296] = 32'h9e00;
mem[297] = 32'h9e80;
mem[298] = 32'h9f00;
mem[299] = 32'h9f80;
mem[300] = 32'ha000;
mem[301] = 32'ha080;
mem[302] = 32'ha100;
mem[303] = 32'ha180;
mem[304] = 32'ha200;
mem[305] = 32'ha280;
mem[306] = 32'ha300;
mem[307] = 32'ha380;
mem[308] = 32'ha400;
mem[309] = 32'ha480;
mem[310] = 32'ha500;
mem[311] = 32'ha580;
mem[312] = 32'ha600;
mem[313] = 32'ha680;
mem[314] = 32'ha700;
mem[315] = 32'ha780;
mem[316] = 32'ha800;
mem[317] = 32'ha880;
mem[318] = 32'ha900;
mem[319] = 32'ha980;
mem[320] = 32'haa00;
mem[321] = 32'haa80;
mem[322] = 32'hab00;
mem[323] = 32'hab80;
mem[324] = 32'hac00;
mem[325] = 32'hac80;
mem[326] = 32'had00;
mem[327] = 32'had80;
mem[328] = 32'hae00;
mem[329] = 32'hae80;
mem[330] = 32'haf00;
mem[331] = 32'haf80;
mem[332] = 32'hb000;
mem[333] = 32'hb080;
mem[334] = 32'hb100;
mem[335] = 32'hb180;
mem[336] = 32'hb200;
mem[337] = 32'hb280;
mem[338] = 32'hb300;
mem[339] = 32'hb380;
mem[340] = 32'hb400;
mem[341] = 32'hb480;
mem[342] = 32'hb500;
mem[343] = 32'hb580;
mem[344] = 32'hb600;
mem[345] = 32'hb680;
mem[346] = 32'hb700;
mem[347] = 32'hb780;
mem[348] = 32'hb800;
mem[349] = 32'hb880;
mem[350] = 32'hb900;
mem[351] = 32'hb980;
mem[352] = 32'hba00;
mem[353] = 32'hba80;
mem[354] = 32'hbb00;
mem[355] = 32'hbb80;
mem[356] = 32'hbc00;
mem[357] = 32'hbc80;
mem[358] = 32'hbd00;
mem[359] = 32'hbd80;
mem[360] = 32'hbe00;
mem[361] = 32'hbe80;
mem[362] = 32'hbf00;
mem[363] = 32'hbf80;
mem[364] = 32'hc000;
mem[365] = 32'hc080;
mem[366] = 32'hc100;
mem[367] = 32'hc180;
mem[368] = 32'hc200;
mem[369] = 32'hc280;
mem[370] = 32'hc300;
mem[371] = 32'hc380;
mem[372] = 32'hc400;
mem[373] = 32'hc480;
mem[374] = 32'hc500;
mem[375] = 32'hc580;
mem[376] = 32'hc600;
mem[377] = 32'hc680;
mem[378] = 32'hc700;
mem[379] = 32'hc780;
mem[380] = 32'hc800;
mem[381] = 32'hc880;
mem[382] = 32'hc900;
mem[383] = 32'hc980;
mem[384] = 32'hca00;
mem[385] = 32'hca80;
mem[386] = 32'hcb00;
mem[387] = 32'hcb80;
mem[388] = 32'hcc00;
mem[389] = 32'hcc80;
mem[390] = 32'hcd00;
mem[391] = 32'hcd80;
mem[392] = 32'hce00;
mem[393] = 32'hce80;
mem[394] = 32'hcf00;
mem[395] = 32'hcf80;
mem[396] = 32'hd000;
mem[397] = 32'hd080;
mem[398] = 32'hd100;
mem[399] = 32'hd180;
mem[400] = 32'hd200;
mem[401] = 32'hd280;
mem[402] = 32'hd300;
mem[403] = 32'hd380;
mem[404] = 32'hd400;
mem[405] = 32'hd480;
mem[406] = 32'hd500;
mem[407] = 32'hd580;
mem[408] = 32'hd600;
mem[409] = 32'hd680;
mem[410] = 32'hd700;
mem[411] = 32'hd780;
mem[412] = 32'hd800;
mem[413] = 32'hd880;
mem[414] = 32'hd900;
mem[415] = 32'hd980;
mem[416] = 32'hda00;
mem[417] = 32'hda80;
mem[418] = 32'hdb00;
mem[419] = 32'hdb80;
mem[420] = 32'hdc00;
mem[421] = 32'hdc80;
mem[422] = 32'hdd00;
mem[423] = 32'hdd80;
mem[424] = 32'hde00;
mem[425] = 32'hde80;
mem[426] = 32'hdf00;
mem[427] = 32'hdf80;
mem[428] = 32'he000;
mem[429] = 32'he080;
mem[430] = 32'he100;
mem[431] = 32'he180;
mem[432] = 32'he200;
mem[433] = 32'he280;
mem[434] = 32'he300;
mem[435] = 32'he380;
mem[436] = 32'he400;
mem[437] = 32'he480;
mem[438] = 32'he500;
mem[439] = 32'he580;
mem[440] = 32'he600;
mem[441] = 32'he680;
mem[442] = 32'he700;
mem[443] = 32'he780;
mem[444] = 32'he800;
mem[445] = 32'he880;
mem[446] = 32'he900;
mem[447] = 32'he980;
mem[448] = 32'hea00;
mem[449] = 32'hea80;
mem[450] = 32'heb00;
mem[451] = 32'heb80;
mem[452] = 32'hec00;
mem[453] = 32'hec80;
mem[454] = 32'hed00;
mem[455] = 32'hed80;
mem[456] = 32'hee00;
mem[457] = 32'hee80;
mem[458] = 32'hef00;
mem[459] = 32'hef80;
mem[460] = 32'hf000;
mem[461] = 32'hf080;
mem[462] = 32'hf100;
mem[463] = 32'hf180;
mem[464] = 32'hf200;
mem[465] = 32'hf280;
mem[466] = 32'hf300;
mem[467] = 32'hf380;
mem[468] = 32'hf400;
mem[469] = 32'hf480;
mem[470] = 32'hf500;
mem[471] = 32'hf580;
mem[472] = 32'hf600;
mem[473] = 32'hf680;
mem[474] = 32'hf700;
mem[475] = 32'hf780;
mem[476] = 32'hf800;
mem[477] = 32'hf880;
mem[478] = 32'hf900;
mem[479] = 32'hf980;
mem[480] = 32'hfa00;
mem[481] = 32'hfa80;
mem[482] = 32'hfb00;
mem[483] = 32'hfb80;
mem[484] = 32'hfc00;
mem[485] = 32'hfc80;
mem[486] = 32'hfd00;
mem[487] = 32'hfd80;
mem[488] = 32'hfe00;
mem[489] = 32'hfe80;
mem[490] = 32'hff00;
mem[491] = 32'hff80;
mem[492] = 32'h10000;
mem[493] = 32'h10080;
mem[494] = 32'h10100;
mem[495] = 32'h10180;
mem[496] = 32'h10200;
mem[497] = 32'h10280;
mem[498] = 32'h10300;
mem[499] = 32'h10380;
mem[500] = 32'h10400;
mem[501] = 32'h10480;
mem[502] = 32'h10500;
mem[503] = 32'h10580;
mem[504] = 32'h10600;
mem[505] = 32'h10680;
mem[506] = 32'h10700;
mem[507] = 32'h10780;
mem[508] = 32'h10800;
mem[509] = 32'h10880;
mem[510] = 32'h10900;
mem[511] = 32'h10980;
mem[512] = 32'h10a00;
mem[513] = 32'h10a80;
mem[514] = 32'h10b00;
mem[515] = 32'h10b80;
mem[516] = 32'h10c00;
mem[517] = 32'h10c80;
mem[518] = 32'h10d00;
mem[519] = 32'h10d80;
mem[520] = 32'h10e00;
mem[521] = 32'h10e80;
mem[522] = 32'h10f00;
mem[523] = 32'h10f80;
mem[524] = 32'h11000;
mem[525] = 32'h11080;
mem[526] = 32'h11100;
mem[527] = 32'h11180;
mem[528] = 32'h11200;
mem[529] = 32'h11280;
mem[530] = 32'h11300;
mem[531] = 32'h11380;
mem[532] = 32'h11400;
mem[533] = 32'h11480;
mem[534] = 32'h11500;
mem[535] = 32'h11580;
mem[536] = 32'h11600;
mem[537] = 32'h11680;
mem[538] = 32'h11700;
mem[539] = 32'h11780;
mem[540] = 32'h11800;
mem[541] = 32'h11880;
mem[542] = 32'h11900;
mem[543] = 32'h11980;
mem[544] = 32'h11a00;
mem[545] = 32'h11a80;
mem[546] = 32'h11b00;
mem[547] = 32'h11b80;
mem[548] = 32'h11c00;
mem[549] = 32'h11c80;
mem[550] = 32'h11d00;
mem[551] = 32'h11d80;
mem[552] = 32'h11e00;
mem[553] = 32'h11e80;
mem[554] = 32'h11f00;
mem[555] = 32'h11f80;
mem[556] = 32'h12000;
mem[557] = 32'h12080;
mem[558] = 32'h12100;
mem[559] = 32'h12180;
mem[560] = 32'h12200;
mem[561] = 32'h12280;
mem[562] = 32'h12300;
mem[563] = 32'h12380;
mem[564] = 32'h12400;
mem[565] = 32'h12480;
mem[566] = 32'h12500;
mem[567] = 32'h12580;
mem[568] = 32'h12600;
mem[569] = 32'h12680;
mem[570] = 32'h12700;
mem[571] = 32'h12780;
mem[572] = 32'h12800;
mem[573] = 32'h12880;
mem[574] = 32'h12900;
mem[575] = 32'h12980;
mem[576] = 32'h12a00;
mem[577] = 32'h12a80;
mem[578] = 32'h12b00;
mem[579] = 32'h12b80;
mem[580] = 32'h12c00;
mem[581] = | |
* d + b * c),
Int(S(1) / ((c + d * x ** S(2)) * sqrt(e + f * x ** S(2))), x),
x,
)
def replacement1005(c, d, e, f, x):
return Dist(
S(1) / c, Int(S(1) / (x ** S(2) * sqrt(e + f * x ** S(2))), x), x
) - Dist(d / c, Int(S(1) / ((c + d * x ** S(2)) * sqrt(e + f * x ** S(2))), x), x)
def replacement1006(a, b, c, d, e, f, x):
return Dist(
d / b, Int(sqrt(e + f * x ** S(2)) / sqrt(c + d * x ** S(2)), x), x
) + Dist(
(-a * d + b * c) / b,
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
)
def replacement1007(a, b, c, d, e, f, x):
return Dist(
d / b, Int(sqrt(e + f * x ** S(2)) / sqrt(c + d * x ** S(2)), x), x
) + Dist(
(-a * d + b * c) / b,
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
)
def replacement1008(a, b, c, d, e, f, x):
return Dist(
b / (-a * f + b * e),
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
) - Dist(
f / (-a * f + b * e),
Int(S(1) / (sqrt(c + d * x ** S(2)) * sqrt(e + f * x ** S(2))), x),
x,
)
def replacement1009(a, b, c, d, e, f, x):
return Simp(
EllipticPi(b * c / (a * d), asin(x * Rt(-d / c, S(2))), c * f / (d * e))
/ (a * sqrt(c) * sqrt(e) * Rt(-d / c, S(2))),
x,
)
def replacement1010(a, b, c, d, e, f, x):
return Dist(
sqrt(S(1) + d * x ** S(2) / c) / sqrt(c + d * x ** S(2)),
Int(
S(1)
/ (
sqrt(S(1) + d * x ** S(2) / c)
* (a + b * x ** S(2))
* sqrt(e + f * x ** S(2))
),
x,
),
x,
)
def replacement1011(a, b, c, d, e, f, x):
return Simp(
c
* sqrt(e + f * x ** S(2))
* EllipticPi(
S(1) - b * c / (a * d), ArcTan(x * Rt(d / c, S(2))), -c * f / (d * e) + S(1)
)
/ (
a
* e
* sqrt(c * (e + f * x ** S(2)) / (e * (c + d * x ** S(2))))
* sqrt(c + d * x ** S(2))
* Rt(d / c, S(2))
),
x,
)
def replacement1012(a, b, c, d, e, f, x):
return Dist(
d / b, Int(S(1) / (sqrt(c + d * x ** S(2)) * sqrt(e + f * x ** S(2))), x), x
) + Dist(
(-a * d + b * c) / b,
Int(
S(1)
/ ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2)) * sqrt(e + f * x ** S(2))),
x,
),
x,
)
def replacement1013(a, b, c, d, e, f, x):
return Dist(
b / (-a * d + b * c),
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
) - Dist(
d / (-a * d + b * c),
Int(sqrt(e + f * x ** S(2)) / (c + d * x ** S(2)) ** (S(3) / 2), x),
x,
)
def replacement1014(a, b, c, d, e, f, x):
return Dist(
(-a * f + b * e) / (-a * d + b * c),
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
) - Dist(
(-c * f + d * e) / (-a * d + b * c),
Int(sqrt(e + f * x ** S(2)) / (c + d * x ** S(2)) ** (S(3) / 2), x),
x,
)
def replacement1015(a, b, c, d, e, f, x):
return Dist(
d / b ** S(2),
Int(
sqrt(e + f * x ** S(2))
* (-a * d + S(2) * b * c + b * d * x ** S(2))
/ sqrt(c + d * x ** S(2)),
x,
),
x,
) + Dist(
(-a * d + b * c) ** S(2) / b ** S(2),
Int(
sqrt(e + f * x ** S(2)) / ((a + b * x ** S(2)) * sqrt(c + d * x ** S(2))), x
),
x,
)
def replacement1016(a, b, c, d, e, f, q, r, x):
return Dist(
b * (-a * f + b * e) / (-a * d + b * c) ** S(2),
Int(
(c + d * x ** S(2)) ** (q + S(2))
* (e + f * x ** S(2)) ** (r + S(-1))
/ (a + b * x ** S(2)),
x,
),
x,
) - Dist(
(-a * d + b * c) ** (S(-2)),
Int(
(c + d * x ** S(2)) ** q
* (e + f * x ** S(2)) ** (r + S(-1))
* (
-a * d ** S(2) * e
- b * c ** S(2) * f
+ S(2) * b * c * d * e
+ d ** S(2) * x ** S(2) * (-a * f + b * e)
),
x,
),
x,
)
def replacement1017(a, b, c, d, e, f, q, r, x):
return Dist(
d / b, Int((c + d * x ** S(2)) ** (q + S(-1)) * (e + f * x ** S(2)) ** r, x), x
) + Dist(
(-a * d + b * c) / b,
Int(
(c + d * x ** S(2)) ** (q + S(-1))
* (e + f * x ** S(2)) ** r
/ (a + b * x ** S(2)),
x,
),
x,
)
def replacement1018(a, b, c, d, e, f, q, r, x):
return Dist(
b ** S(2) / (-a * d + b * c) ** S(2),
Int(
(c + d * x ** S(2)) ** (q + S(2))
* (e + f * x ** S(2)) ** r
/ (a + b * x ** S(2)),
x,
),
x,
) - Dist(
d / (-a * d + b * c) ** S(2),
Int(
(c + d * x ** S(2)) ** q
* (e + f * x ** S(2)) ** r
* (-a * d + S(2) * b * c + b * d * x ** S(2)),
x,
),
x,
)
def replacement1019(a, b, c, d, e, f, q, r, x):
return Dist(
b / (-a * d + b * c),
Int(
(c + d * x ** S(2)) ** (q + S(1))
* (e + f * x ** S(2)) ** r
/ (a + b * x ** S(2)),
x,
),
x,
) - Dist(
d / (-a * d + b * c),
Int((c + d * x ** S(2)) ** q * (e + f * x ** S(2)) ** r, x),
x,
)
def replacement1020(a, b, c, d, e, f, x):
return (
Dist(
(-(a ** S(2)) * d * f + b ** S(2) * c * e) / (S(2) * a * b ** S(2)),
Int(
| |
Ptr64 Void
# +0x1680 TlsLinks : _LIST_ENTRY
# +0x1690 Vdm : Ptr64 Void
# +0x1698 ReservedForNtRpc : Ptr64 Void
# +0x16a0 DbgSsReserved : [2] Ptr64 Void
# +0x16b0 HardErrorMode : Uint4B
# +0x16b8 Instrumentation : [14] Ptr64 Void
# +0x1728 SubProcessTag : Ptr64 Void
# +0x1730 EtwTraceData : Ptr64 Void
# +0x1738 WinSockData : Ptr64 Void
# +0x1740 GdiBatchCount : Uint4B
# +0x1744 InDbgPrint : UChar
# +0x1745 FreeStackOnTermination : UChar
# +0x1746 HasFiberData : UChar
# +0x1747 IdealProcessor : UChar
# +0x1748 GuaranteedStackBytes : Uint4B
# +0x1750 ReservedForPerf : Ptr64 Void
# +0x1758 ReservedForOle : Ptr64 Void
# +0x1760 WaitingOnLoaderLock : Uint4B
# +0x1768 SparePointer1 : Uint8B
# +0x1770 SoftPatchPtr1 : Uint8B
# +0x1778 SoftPatchPtr2 : Uint8B
# +0x1780 TlsExpansionSlots : Ptr64 Ptr64 Void
# +0x1788 DeallocationBStore : Ptr64 Void
# +0x1790 BStoreLimit : Ptr64 Void
# +0x1798 ImpersonationLocale : Uint4B
# +0x179c IsImpersonating : Uint4B
# +0x17a0 NlsCache : Ptr64 Void
# +0x17a8 pShimData : Ptr64 Void
# +0x17b0 HeapVirtualAffinity : Uint4B
# +0x17b8 CurrentTransactionHandle : Ptr64 Void
# +0x17c0 ActiveFrame : Ptr64 _TEB_ACTIVE_FRAME
# +0x17c8 FlsData : Ptr64 Void
# +0x17d0 SafeThunkCall : UChar
# +0x17d1 BooleanSpare : [3] UChar
class _TEB_XP_64(Structure):
_pack_ = 8
_fields_ = [
("NtTib", NT_TIB),
("EnvironmentPointer", PVOID),
("ClientId", CLIENT_ID),
("ActiveRpcHandle", PVOID),
("ThreadLocalStoragePointer", PVOID),
("ProcessEnvironmentBlock", PVOID), # PPEB
("LastErrorValue", DWORD),
("CountOfOwnedCriticalSections", DWORD),
("CsrClientThread", PVOID),
("Win32ThreadInfo", PVOID),
("User32Reserved", DWORD * 26),
("UserReserved", DWORD * 5),
("WOW32Reserved", PVOID), # ptr to wow64cpu!X86SwitchTo64BitMode
("CurrentLocale", DWORD),
("FpSoftwareStatusRegister", DWORD),
("SystemReserved1", PVOID * 54),
("ExceptionCode", SDWORD),
("ActivationContextStackPointer", PVOID), # PACTIVATION_CONTEXT_STACK
("SpareBytes1", UCHAR * 28),
("GdiTebBatch", GDI_TEB_BATCH),
("RealClientId", CLIENT_ID),
("GdiCachedProcessHandle", HANDLE),
("GdiClientPID", DWORD),
("GdiClientTID", DWORD),
("GdiThreadLocalInfo", PVOID),
("Win32ClientInfo", QWORD * 62),
("glDispatchTable", PVOID * 233),
("glReserved1", QWORD * 29),
("glReserved2", PVOID),
("glSectionInfo", PVOID),
("glSection", PVOID),
("glTable", PVOID),
("glCurrentRC", PVOID),
("glContext", PVOID),
("LastStatusValue", NTSTATUS),
("StaticUnicodeString", UNICODE_STRING),
("StaticUnicodeBuffer", WCHAR * 261),
("DeallocationStack", PVOID),
("TlsSlots", PVOID * 64),
("TlsLinks", LIST_ENTRY),
("Vdm", PVOID),
("ReservedForNtRpc", PVOID),
("DbgSsReserved", PVOID * 2),
("HardErrorMode", DWORD),
("Instrumentation", PVOID * 14),
("SubProcessTag", PVOID),
("EtwTraceData", PVOID),
("WinSockData", PVOID),
("GdiBatchCount", DWORD),
("InDbgPrint", BOOLEAN),
("FreeStackOnTermination", BOOLEAN),
("HasFiberData", BOOLEAN),
("IdealProcessor", UCHAR),
("GuaranteedStackBytes", DWORD),
("ReservedForPerf", PVOID),
("ReservedForOle", PVOID),
("WaitingOnLoaderLock", DWORD),
("SparePointer1", PVOID),
("SoftPatchPtr1", PVOID),
("SoftPatchPtr2", PVOID),
("TlsExpansionSlots", PVOID), # Ptr64 Ptr64 Void
("DeallocationBStore", PVOID),
("BStoreLimit", PVOID),
("ImpersonationLocale", DWORD),
("IsImpersonating", BOOL),
("NlsCache", PVOID),
("pShimData", PVOID),
("HeapVirtualAffinity", DWORD),
("CurrentTransactionHandle", HANDLE),
("ActiveFrame", PVOID), # PTEB_ACTIVE_FRAME
("FlsData", PVOID),
("SafeThunkCall", BOOLEAN),
("BooleanSpare", BOOLEAN * 3),
]
# +0x000 NtTib : _NT_TIB
# +0x01c EnvironmentPointer : Ptr32 Void
# +0x020 ClientId : _CLIENT_ID
# +0x028 ActiveRpcHandle : Ptr32 Void
# +0x02c ThreadLocalStoragePointer : Ptr32 Void
# +0x030 ProcessEnvironmentBlock : Ptr32 _PEB
# +0x034 LastErrorValue : Uint4B
# +0x038 CountOfOwnedCriticalSections : Uint4B
# +0x03c CsrClientThread : Ptr32 Void
# +0x040 Win32ThreadInfo : Ptr32 Void
# +0x044 User32Reserved : [26] Uint4B
# +0x0ac UserReserved : [5] Uint4B
# +0x0c0 WOW32Reserved : Ptr32 Void
# +0x0c4 CurrentLocale : Uint4B
# +0x0c8 FpSoftwareStatusRegister : Uint4B
# +0x0cc SystemReserved1 : [54] Ptr32 Void
# +0x1a4 ExceptionCode : Int4B
# +0x1a8 ActivationContextStackPointer : Ptr32 _ACTIVATION_CONTEXT_STACK
# +0x1ac SpareBytes1 : [40] UChar
# +0x1d4 GdiTebBatch : _GDI_TEB_BATCH
# +0x6b4 RealClientId : _CLIENT_ID
# +0x6bc GdiCachedProcessHandle : Ptr32 Void
# +0x6c0 GdiClientPID : Uint4B
# +0x6c4 GdiClientTID : Uint4B
# +0x6c8 GdiThreadLocalInfo : Ptr32 Void
# +0x6cc Win32ClientInfo : [62] Uint4B
# +0x7c4 glDispatchTable : [233] Ptr32 Void
# +0xb68 glReserved1 : [29] Uint4B
# +0xbdc glReserved2 : Ptr32 Void
# +0xbe0 glSectionInfo : Ptr32 Void
# +0xbe4 glSection : Ptr32 Void
# +0xbe8 glTable : Ptr32 Void
# +0xbec glCurrentRC : Ptr32 Void
# +0xbf0 glContext : Ptr32 Void
# +0xbf4 LastStatusValue : Uint4B
# +0xbf8 StaticUnicodeString : _UNICODE_STRING
# +0xc00 StaticUnicodeBuffer : [261] Uint2B
# +0xe0c DeallocationStack : Ptr32 Void
# +0xe10 TlsSlots : [64] Ptr32 Void
# +0xf10 TlsLinks : _LIST_ENTRY
# +0xf18 Vdm : Ptr32 Void
# +0xf1c ReservedForNtRpc : Ptr32 Void
# +0xf20 DbgSsReserved : [2] Ptr32 Void
# +0xf28 HardErrorMode : Uint4B
# +0xf2c Instrumentation : [14] Ptr32 Void
# +0xf64 SubProcessTag : Ptr32 Void
# +0xf68 EtwTraceData : Ptr32 Void
# +0xf6c WinSockData : Ptr32 Void
# +0xf70 GdiBatchCount : Uint4B
# +0xf74 InDbgPrint : UChar
# +0xf75 FreeStackOnTermination : UChar
# +0xf76 HasFiberData : UChar
# +0xf77 IdealProcessor : UChar
# +0xf78 GuaranteedStackBytes : Uint4B
# +0xf7c ReservedForPerf : Ptr32 Void
# +0xf80 ReservedForOle : Ptr32 Void
# +0xf84 WaitingOnLoaderLock : Uint4B
# +0xf88 SparePointer1 : Uint4B
# +0xf8c SoftPatchPtr1 : Uint4B
# +0xf90 SoftPatchPtr2 : Uint4B
# +0xf94 TlsExpansionSlots : Ptr32 Ptr32 Void
# +0xf98 ImpersonationLocale : Uint4B
# +0xf9c IsImpersonating : Uint4B
# +0xfa0 NlsCache : Ptr32 Void
# +0xfa4 pShimData : Ptr32 Void
# +0xfa8 HeapVirtualAffinity : Uint4B
# +0xfac CurrentTransactionHandle : Ptr32 Void
# +0xfb0 ActiveFrame : Ptr32 _TEB_ACTIVE_FRAME
# +0xfb4 FlsData : Ptr32 Void
# +0xfb8 SafeThunkCall : UChar
# +0xfb9 BooleanSpare : [3] UChar
class _TEB_2003(Structure):
_pack_ = 8
_fields_ = [
("NtTib", NT_TIB),
("EnvironmentPointer", PVOID),
("ClientId", CLIENT_ID),
("ActiveRpcHandle", HANDLE),
("ThreadLocalStoragePointer", PVOID),
("ProcessEnvironmentBlock", PVOID), # PPEB
("LastErrorValue", DWORD),
("CountOfOwnedCriticalSections", DWORD),
("CsrClientThread", PVOID),
("Win32ThreadInfo", PVOID),
("User32Reserved", DWORD * 26),
("UserReserved", DWORD * 5),
("WOW32Reserved", PVOID), # ptr to wow64cpu!X86SwitchTo64BitMode
("CurrentLocale", DWORD),
("FpSoftwareStatusRegister", DWORD),
("SystemReserved1", PVOID * 54),
("ExceptionCode", SDWORD),
("ActivationContextStackPointer", PVOID), # PACTIVATION_CONTEXT_STACK
("SpareBytes1", UCHAR * 40),
("GdiTebBatch", GDI_TEB_BATCH),
("RealClientId", CLIENT_ID),
("GdiCachedProcessHandle", HANDLE),
("GdiClientPID", DWORD),
("GdiClientTID", DWORD),
("GdiThreadLocalInfo", PVOID),
("Win32ClientInfo", DWORD * 62),
("glDispatchTable", PVOID * 233),
("glReserved1", DWORD * 29),
("glReserved2", PVOID),
("glSectionInfo", PVOID),
("glSection", PVOID),
("glTable", PVOID),
("glCurrentRC", PVOID),
("glContext", PVOID),
("LastStatusValue", NTSTATUS),
("StaticUnicodeString", UNICODE_STRING),
("StaticUnicodeBuffer", WCHAR * 261),
("DeallocationStack", PVOID),
("TlsSlots", PVOID * 64),
("TlsLinks", LIST_ENTRY),
("Vdm", PVOID),
("ReservedForNtRpc", PVOID),
("DbgSsReserved", PVOID * 2),
("HardErrorMode", DWORD),
("Instrumentation", PVOID * 14),
("SubProcessTag", PVOID),
("EtwTraceData", PVOID),
("WinSockData", PVOID),
("GdiBatchCount", DWORD),
("InDbgPrint", BOOLEAN),
("FreeStackOnTermination", BOOLEAN),
("HasFiberData", BOOLEAN),
("IdealProcessor", UCHAR),
("GuaranteedStackBytes", DWORD),
("ReservedForPerf", PVOID),
("ReservedForOle", PVOID),
("WaitingOnLoaderLock", DWORD),
("SparePointer1", PVOID),
("SoftPatchPtr1", PVOID),
("SoftPatchPtr2", PVOID),
("TlsExpansionSlots", PVOID), # Ptr32 Ptr32 Void
("ImpersonationLocale", DWORD),
("IsImpersonating", BOOL),
("NlsCache", PVOID),
("pShimData", PVOID),
("HeapVirtualAffinity", DWORD),
("CurrentTransactionHandle", HANDLE),
("ActiveFrame", PVOID), # PTEB_ACTIVE_FRAME
("FlsData", PVOID),
("SafeThunkCall", BOOLEAN),
("BooleanSpare", BOOLEAN * 3),
]
_TEB_2003_64 = _TEB_XP_64
_TEB_2003_R2 = _TEB_2003
_TEB_2003_R2_64 = _TEB_2003_64
# +0x000 NtTib : _NT_TIB
# +0x01c EnvironmentPointer : Ptr32 Void
# +0x020 ClientId : _CLIENT_ID
# +0x028 ActiveRpcHandle : Ptr32 Void
# +0x02c ThreadLocalStoragePointer : Ptr32 Void
# +0x030 ProcessEnvironmentBlock : Ptr32 _PEB
# +0x034 LastErrorValue : Uint4B
# +0x038 CountOfOwnedCriticalSections : Uint4B
# +0x03c CsrClientThread : Ptr32 Void
# +0x040 Win32ThreadInfo : Ptr32 Void
# +0x044 User32Reserved : [26] Uint4B
# +0x0ac UserReserved : [5] Uint4B
# +0x0c0 WOW32Reserved : Ptr32 Void
# +0x0c4 CurrentLocale : Uint4B
# +0x0c8 FpSoftwareStatusRegister : Uint4B
# +0x0cc SystemReserved1 : [54] Ptr32 Void
# +0x1a4 ExceptionCode : Int4B
# +0x1a8 ActivationContextStackPointer : Ptr32 _ACTIVATION_CONTEXT_STACK
# +0x1ac SpareBytes1 : [36] UChar
# +0x1d0 TxFsContext : Uint4B
# +0x1d4 GdiTebBatch : _GDI_TEB_BATCH
# +0x6b4 RealClientId : _CLIENT_ID
# +0x6bc GdiCachedProcessHandle : Ptr32 Void
# +0x6c0 GdiClientPID : Uint4B
# +0x6c4 GdiClientTID : Uint4B
# +0x6c8 GdiThreadLocalInfo : Ptr32 Void
# +0x6cc Win32ClientInfo : [62] Uint4B
# +0x7c4 glDispatchTable : [233] Ptr32 Void
# +0xb68 glReserved1 : [29] Uint4B
# +0xbdc glReserved2 : Ptr32 Void
# +0xbe0 glSectionInfo : Ptr32 Void
# +0xbe4 glSection : Ptr32 Void
# +0xbe8 glTable : Ptr32 Void
# +0xbec glCurrentRC : Ptr32 Void
# +0xbf0 glContext : Ptr32 Void
# +0xbf4 LastStatusValue : Uint4B
# +0xbf8 StaticUnicodeString : _UNICODE_STRING
# +0xc00 StaticUnicodeBuffer : [261] Wchar
# +0xe0c DeallocationStack : Ptr32 Void
# +0xe10 TlsSlots : [64] Ptr32 Void
# +0xf10 TlsLinks : _LIST_ENTRY
# +0xf18 Vdm : Ptr32 Void
# +0xf1c ReservedForNtRpc : Ptr32 Void
# +0xf20 DbgSsReserved : [2] Ptr32 Void
# +0xf28 HardErrorMode : Uint4B
# +0xf2c Instrumentation : [9] Ptr32 Void
# +0xf50 ActivityId : _GUID
# +0xf60 SubProcessTag : Ptr32 Void
# +0xf64 EtwLocalData : Ptr32 Void
# +0xf68 EtwTraceData : Ptr32 Void
# +0xf6c WinSockData : Ptr32 Void
# +0xf70 GdiBatchCount : Uint4B
# +0xf74 SpareBool0 : UChar
# +0xf75 SpareBool1 : UChar
# +0xf76 SpareBool2 : UChar
# +0xf77 IdealProcessor : UChar
# +0xf78 GuaranteedStackBytes : Uint4B
# +0xf7c ReservedForPerf : Ptr32 Void
# +0xf80 ReservedForOle : Ptr32 Void
# +0xf84 WaitingOnLoaderLock : Uint4B
# +0xf88 SavedPriorityState : Ptr32 Void
# +0xf8c SoftPatchPtr1 : Uint4B
# +0xf90 ThreadPoolData : Ptr32 Void
# +0xf94 TlsExpansionSlots : Ptr32 Ptr32 Void
# +0xf98 ImpersonationLocale : Uint4B
# +0xf9c IsImpersonating : Uint4B
# +0xfa0 NlsCache : Ptr32 Void
# +0xfa4 pShimData : Ptr32 Void
# +0xfa8 HeapVirtualAffinity : Uint4B
# +0xfac CurrentTransactionHandle : Ptr32 Void
# +0xfb0 ActiveFrame : Ptr32 _TEB_ACTIVE_FRAME
# +0xfb4 FlsData : Ptr32 Void
# +0xfb8 PreferredLanguages : Ptr32 Void
# +0xfbc UserPrefLanguages : Ptr32 Void
# +0xfc0 MergedPrefLanguages : Ptr32 Void
# +0xfc4 MuiImpersonation : Uint4B
# +0xfc8 CrossTebFlags : Uint2B
# +0xfc8 SpareCrossTebBits : Pos 0, 16 Bits
# +0xfca SameTebFlags : Uint2B
# +0xfca DbgSafeThunkCall : Pos 0, 1 Bit
# +0xfca DbgInDebugPrint : Pos 1, 1 Bit
# +0xfca DbgHasFiberData : Pos 2, 1 Bit
# +0xfca DbgSkipThreadAttach : Pos 3, 1 Bit
# +0xfca DbgWerInShipAssertCode : Pos 4, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.